import os
import sys
from collections import defaultdict

import cv2
from torchvision import transforms

sys.path.append("/home/yangchen/projects/other_experments/di-fusion")
import argparse
import copy
import functools
import logging
import threading
from pathlib import Path

import network.utility as net_util
import open3d as o3d
import system.tracker
import torch
import torch.nn as nn
import torch.nn.functional as F
from dataset.production import *
from fairnr.clib import (aabb_ray_intersect, inverse_cdf_sampling,
                         svo_ray_intersect, triangle_ray_intersect,
                         uniform_ray_sampling)
from network.loss import SRNLossCriterion
from network.radiance_field import RaidanceField
from network.renderer import VolumeRenderer
from pyquaternion import Quaternion
from trainer import make_encoder
from utils import data_utils, exp_util, motion_util, vis_util

MAX_DEPTH = 10000.0
TINY = 1e-9

class OptimResultsSet:
    def __init__(self):
        self.latent_ids = None
        self.new_latent_vecs = None
        self.old_latent_vecs = None
        self.old_latent_obs_counts = None

    def clear(self):
        self.latent_ids = None
        self.new_latent_vecs = None
        self.old_latent_vecs = None
        self.old_latent_obs_counts = None

class MeshExtractCache:
    def __init__(self, device):
        self.vertices = None
        self.vertices_flatten_id = None
        self.vertices_std = None
        self.updated_vec_id = None
        self.device = device
        self.clear_updated_vec()

    def clear_updated_vec(self):
        self.updated_vec_id = torch.empty((0, ), device=self.device, dtype=torch.long)

    def clear_all(self):
        self.vertices = None
        self.vertices_flatten_id = None
        self.vertices_std = None
        self.updated_vec_id = None
        self.clear_updated_vec()

class ICLNUIMSequence(RGBDSequence):
    def __init__(self, path: str, start_frame: int = 0, end_frame: int = -1, first_tq: list = None, load_gt: bool = False):
        super().__init__()
        self.path = Path(path)
        self.color_names = sorted([f"rgb/{t}" for t in os.listdir(self.path / "rgb")], key=lambda t: int(t[4:].split(".")[0]))
        self.depth_names = [f"depth/{t}.png" for t in range(len(self.color_names))]
        self.calib = [481.2, 480.0, 319.50, 239.50, 5000.0] # if Scannet data is calibed, fx fy cx cy mile-meter

        if first_tq is not None:
            self.first_iso = motion_util.Isometry(q=Quaternion(array=first_tq[3:]), t=np.array(first_tq[:3]))
        else:
            self.first_iso = motion_util.Isometry(q=Quaternion(array=[0.0, -1.0, 0.0, 0.0]))

        if end_frame == -1:
            end_frame = len(self.color_names)

        self.color_names = self.color_names[start_frame:end_frame]
        self.depth_names = self.depth_names[start_frame:end_frame]

        if load_gt:
            gt_traj_path = (list(self.path.glob("*.freiburg")) + list(self.path.glob("groundtruth.txt")))[0]
            self.gt_trajectory = self._parse_traj_file(gt_traj_path)
            self.gt_trajectory = self.gt_trajectory[start_frame:end_frame]
            change_iso = self.first_iso.dot(self.gt_trajectory[0].inv())
            self.gt_trajectory = [change_iso.dot(t) for t in self.gt_trajectory]
            assert len(self.gt_trajectory) == len(self.color_names)
        else:
            self.gt_trajectory = None
        ## use the Scannet? 

    def _parse_traj_file(self, traj_path):
        camera_ext = {}
        traj_data = np.genfromtxt(traj_path)
        cano_quat = motion_util.Isometry(q=Quaternion(axis=[0.0, 0.0, 1.0], degrees=180.0))
        for cur_p in traj_data:
            cur_q = Quaternion(imaginary=cur_p[4:7], real=cur_p[-1]).rotation_matrix
            cur_t = cur_p[1:4]
            cur_q[1] = -cur_q[1]
            cur_q[:, 1] = -cur_q[:, 1]
            cur_t[1] = -cur_t[1]
            cur_iso = motion_util.Isometry(q=Quaternion(matrix=cur_q), t=cur_t)
            camera_ext[cur_p[0]] = cano_quat.dot(cur_iso)
        camera_ext[0] = camera_ext[1]
        return [camera_ext[t] for t in range(len(camera_ext))]

    def __len__(self):
        return len(self.color_names)

    def __next__(self):
        if self.frame_id >= len(self):
            raise StopIteration

        depth_img_path = self.path / self.depth_names[self.frame_id]
        rgb_img_path = self.path / self.color_names[self.frame_id]

        # Convert depth image into point cloud.
        depth_data = cv2.imread(str(depth_img_path), cv2.IMREAD_UNCHANGED)
        depth_data = torch.from_numpy(depth_data.astype(np.float32)).to(self.device) / self.calib[4]
        rgb_data = cv2.imread(str(rgb_img_path))
        rgb_data = cv2.cvtColor(rgb_data, cv2.COLOR_BGR2RGB)
        rgb_data = torch.from_numpy(rgb_data).to(self.device).float() / 255.

        frame_data = FrameData()
        frame_data.gt_pose = self.gt_trajectory[self.frame_id] if self.gt_trajectory is not None else None
        frame_data.calib = FrameIntrinsic(self.calib[0], self.calib[1], self.calib[2], self.calib[3], self.calib[4])
        frame_data.depth = depth_data
        frame_data.rgb = rgb_data

        self.frame_id += 1
        return frame_data

class ScannetSequence(RGBDSequence):
    def __init__(self, path: str, args: argparse.Namespace, start_frame: int = 0, end_frame: int = -1, 
                            first_tq: list = None, load_gt: bool = False, device = None, training=True):
        super().__init__()
        self.args = args
        self.path = Path(path)
        self.device = device
        self.voxel_size = args.mapping.voxel_size
        self.max_hits = args.max_hits
        self.training=training
        # self.color_names = sorted([f"images_scale_depth/{t}" for t in os.listdir(self.path / "images_scale_depth")])
        # self.depth_names = sorted([f"depth/{t}" for t in os.listdir(self.path / "depth")])
        self.color_names = sorted([f"rgb/{t}" for t in os.listdir(self.path / "rgb")], key=lambda t: int(t[4:].split(".")[0]))
        self.depth_names = [f"depth/{t}.png" for t in range(len(self.color_names))]
        self.calib = [481.2, 480.0, 319.50, 239.50, 5000.0] # if Scannet data is calibed, fx fy cx cy mile-meter
        self.image_shape = np.array([640., 480.]) # x0y1, W, H
        # use shape mk uv
        uv, ratio = data_utils.get_uv(self.image_shape[1], self.image_shape[0], self.image_shape[1], self.image_shape[0])
        self.uv = uv
        self.image_shape = torch.from_numpy(self.image_shape).float().to(device)
        self.K = np.asarray([
            [self.calib[0], 0.0, self.calib[2],0.0],
            [0.0, self.calib[1], self.calib[3],0.0],
            [0.0, 0.0, 1.0, 0.0],
            [0.0, 0.0, 0.0, 1.0]])
        if False:
            self.pose_names = sorted([f"pose/{t}" for t in os.listdir(self.path / "pose")])
            self.calib = [577.590698, 578.729797, 318.905426, 242.683609, 1000.0] # if Scannet data is calibed, fx fy cx cy mile-meter

        if first_tq is not None:
            self.first_iso = motion_util.Isometry(q=Quaternion(array=first_tq[3:]), t=np.array(first_tq[:3]))
        else:
            self.first_iso = motion_util.Isometry(q=Quaternion(array=[0.0, -1.0, 0.0, 0.0]))

        if end_frame == -1:
            end_frame = len(self.color_names)

        self.color_names = self.color_names[start_frame:end_frame]
        self.depth_names = self.depth_names[start_frame:end_frame]

        if load_gt:
            gt_traj_path = (list(self.path.glob("*.freiburg")) + list(self.path.glob("groundtruth.txt")))[0]
            self.gt_trajectory = self._parse_traj_file(gt_traj_path)
            self.gt_trajectory = self.gt_trajectory[start_frame:end_frame]
            change_iso = self.first_iso.dot(self.gt_trajectory[0].inv())
            self.gt_trajectory = [change_iso.dot(t) for t in self.gt_trajectory]
            assert len(self.gt_trajectory) == len(self.color_names)
        else:
            self.gt_trajectory = None

        def save_pose(gt_trajectory, depth_names):
            pose_path = "/data/yangchen/ICL-NUIM/Di-Fusion_demo/pose"
            ext_path = "/data/yangchen/ICL-NUIM/Di-Fusion_demo/extrinsic"
            for index, i in enumerate(gt_trajectory):
                pose = i.matrix
                extrinsic = np.linalg.inv(pose)
                name = depth_names[index].split(".")[0][6:]
                print(name)
                np.savetxt(os.path.join(pose_path, name+".txt"),pose)
                np.savetxt(os.path.join(ext_path, name+".txt"),extrinsic)
        # save_pose(self.gt_trajectory,self.depth_names)
        
        ## use the Scannet pose
        # self.pose = sorted(os.listdir(os.path.join(self.path, "pose")))
        self.map = Map_for_preprocessing(self.args, device=device).to(device)
        self.tracker = system.tracker.SDFTracker(self.map, args.tracking)
        self.img_encoder = make_encoder(num_layers=3).to(self.device)
        self.rgb_processer = vis_util.get_image_to_tensor_balanced()
        self.rgb_norm = vis_util.get_nor_img()
        ## get volume renderer
        self.renderer = VolumeRenderer(self.args).to(device)
        self.step_size = args.raymarching_stepsize_ratio * self.voxel_size
        self.field = RaidanceField(self.args).to(device)
        ## get loss func
        self.criterion = SRNLossCriterion(self.args)

    def _parse_traj_file(self, traj_path):
        camera_ext = {}
        traj_data = np.genfromtxt(traj_path)
        cano_quat = motion_util.Isometry(q=Quaternion(axis=[0.0, 0.0, 1.0], degrees=180.0))
        for cur_p in traj_data:
            cur_q = Quaternion(imaginary=cur_p[4:7], real=cur_p[-1]).rotation_matrix
            cur_t = cur_p[1:4]
            cur_q[1] = -cur_q[1]
            cur_q[:, 1] = -cur_q[:, 1]
            cur_t[1] = -cur_t[1]
            cur_iso = motion_util.Isometry(q=Quaternion(matrix=cur_q), t=cur_t)
            camera_ext[cur_p[0]] = cano_quat.dot(cur_iso)
        camera_ext[0] = camera_ext[1]
        return [camera_ext[t] for t in range(len(camera_ext))]

    def load_pose(self, path):
        def load_matrix_from_txt(path, shape=(4, 4)):
            with open(path) as f:
                txt = f.readlines()
            txt = ''.join(txt).replace('\n', ' ')
            matrix = [float(v) for v in txt.split()]
            return np.array(matrix).reshape(shape)
        return load_matrix_from_txt(path)

    def __len__(self):
        return len(self.color_names)

    def __next__(self, index):
        # if self.frame_id >= len(self):
        #     raise StopIteration

        depth_img_path = self.path / self.depth_names[index]
        rgb_img_path = self.path / self.color_names[index]
        # pose_path = self.path / self.pose_names[index]

        # Convert depth image into point cloud.
        depth_data = cv2.imread(str(depth_img_path), cv2.IMREAD_UNCHANGED)
        depth_data = torch.from_numpy(depth_data.astype(np.float32)) / self.calib[4]
        rgb_data = cv2.imread(str(rgb_img_path))
        rgb_data = cv2.cvtColor(rgb_data, cv2.COLOR_BGR2RGB) # eqs imageio.imread()[..., :3]
        rgb_data = torch.from_numpy(rgb_data).float() / 255.

        frame_data = FrameData()
        frame_data.gt_pose = self.gt_trajectory[index] if self.gt_trajectory is not None else None
        frame_data.calib = FrameIntrinsic(self.calib[0], self.calib[1], self.calib[2], self.calib[3], self.calib[4])
        frame_data.depth = depth_data.to(self.device)
        frame_data.rgb = rgb_data.to(self.device)

        # frame_data.gt_pose = self.load_pose(pose_path)

        # frame_data = {}
        # frame_data["K"] = FrameIntrinsic(self.calib[0], self.calib[1], self.calib[2], self.calib[3], self.calib[4]).to_K().astype(np.float32)
        # frame_data["depth"]=depth_data
        # frame_data["rgb"]=rgb_data
        # frame_data["gt_pose"]=self.load_pose(pose_path).astype(np.float32)
        # self.frame_id += 1
        return frame_data

    def __getitem__(self, index):
        # TODO: standard training later
        self.frame_id = index
        cur_frame = self.__next__()
        # voxel = self.preprocess(cur_frame)
        return None

    def precompute(self, index=None):
        all_frames = range(len(self))
        indexs = np.random.choice(all_frames, self.args.num, replace=False) # (num,)
        indexs = [0,1,2,3]
        # ray_info = {new_list: [] for new_list in ["ray_start", "ray_dir"]}
        for index in indexs:
            cur_frame = self.__next__(index)
            # use nan to represent missing data
            cur_frame.depth[torch.logical_or(cur_frame.depth < self.args.depth_cut_min,
                                cur_frame.depth > self.args.depth_cut_max)] = np.nan
            ## transfer to camera coor
            tracker_pc, tracker_normal = self.tracker.track_camera(cur_frame.rgb, cur_frame.depth, cur_frame.calib,
                            self.first_iso if len(self.tracker.all_pd_pose) == 0 else None, for_pc=True)
            opt_depth = cur_frame.gt_pose @ tracker_pc ## eqs torch.from_numpy(cur_frame.gt_pose.matrix[:,:3]).to(self.device).float()@tracker_pc.T
            opt_normal = cur_frame.gt_pose.rotation @ tracker_normal
            ## integrate all the points
            self.map.integrate_keyframe(opt_depth, opt_normal, False, False)
        
        ### integrate all keyframes, update code vars: indexer, latent_vecs_pos, 
        encoder_states = self.map.get_info_as_NSVF()    #   voxel_vertex_idx, voxel_center_xyz, voxel_pos_unique
        pos_feat_l = []
        valid_mask_l = []
        for index in indexs:
            cur_frame = self.__next__(index)
            uv = self.reprojection(encoder_states["voxel_pos_unique"], cur_frame)
            # get uv valid_index
            valid_mask = motion_util.get_valid_uv(uv, w=self.image_shape[0], h = self.image_shape[1])
            #### vis the uv:
            # test_img = np.zeros((480,640,3))
            # for i in uv:
            #     test_img = cv2.circle(test_img, i.cpu().numpy().astype(np.uint16), 1, (255, 0, 0), 1)
            # cv2.imwrite("test.jpg", test_img)

            pos_feat = self.get_corresponding_feat(uv, cur_frame.rgb)
            valid_mask_l.append(valid_mask.float())
            pos_feat_l.append(pos_feat)
        # use mean
        # pos_feat = torch.stack(pos_feat_l).mean(dim=0).squeeze() # latentsize, N
        # use element wise mean
        valid_mask = torch.clamp_min(torch.stack(valid_mask_l).sum(dim=0), 1.) 
        pos_feat = torch.stack(pos_feat_l).sum(dim=0).squeeze().permute(1,0) / valid_mask # only use valid part to mean

        ## update embedding 
        self.map.update_feat(pos_feat, encoder_states)

        return encoder_states

    def forward(self, index):
        # sample = defaultdict(lambda: None)
        encoder_states = self.precompute()
        ro, rd = self.get_corresponding_rays(index)
        S = 1
        V = 1
        P, _ = rd.shape
        ray_start, ray_dir, intersection_outputs, hits = self.ray_intersect(
                torch.from_numpy(ro).to(self.device), torch.from_numpy(rd).to(self.device), encoder_states)
        # conduct pixel sampling
        tmp_uv = self.uv.reshape(2,-1)[None,None,:,:] # 1, 1, 2, P
        mask = hits.reshape(S, V, tmp_uv.shape[-1]) # SVP

        sampled_uv, sampled_masks = self.sample_pixels(tmp_uv, self.image_shape, mask=mask)
        sampled_masks = sampled_masks.reshape(S, -1).bool()
        hits, sampled_masks = hits[sampled_masks].reshape(S, -1), sampled_masks.unsqueeze(-1)
        intersection_outputs = {name: outs[sampled_masks.expand_as(outs)].reshape(S, -1, outs.size(-1)) 
            for name, outs in intersection_outputs.items()}
        ray_start = ray_start[sampled_masks.expand_as(ray_start)].reshape(S, -1, 3)
        ray_dir = ray_dir[sampled_masks.expand_as(ray_dir)].reshape(S, -1, 3)
        
        min_depth = intersection_outputs['min_depth']
        max_depth = intersection_outputs['max_depth']
        pts_idx = intersection_outputs['intersected_voxel_idx']
        dists = (max_depth - min_depth).masked_fill(pts_idx.eq(-1), 0)
        intersection_outputs['probs'] = dists / dists.sum(dim=-1, keepdim=True)
        intersection_outputs['steps'] = dists.sum(-1) / self.step_size
        # save the original rays
        ray_start0 = ray_start.reshape(-1, 3).clone() # 2048, 3
        ray_dir0 = ray_dir.reshape(-1, 3).clone()

        P = ray_dir.shape[1]// V
        # all_results = defaultdict(lambda: None)

        if hits.sum() > 0:
            intersection_outputs = {
                name: outs[hits] for name, outs in intersection_outputs.items()}
            ray_start, ray_dir = ray_start[hits], ray_dir[hits]
            encoder_states = {name: s.reshape(-1, s.size(-1)) if s is not None else None
                for name, s in encoder_states.items()}
            
            samples, all_results = self.raymarching(
                ray_start, ray_dir, intersection_outputs, encoder_states)
            
            if False:   # hierarchical sampling
                intersection_outputs = self.prepare_hierarchical_sampling(
                    intersection_outputs, samples, all_results)
                coarse_results = all_results.copy()
                
                samples, all_results = self.raymarching(
                    ray_start, ray_dir, intersection_outputs, encoder_states, fine=True)
                all_results['coarse'] = coarse_results

        hits = hits.reshape(-1)
        all_results = self.postprocessing(ray_start0, ray_dir0, all_results, hits, (S, V, P))
        if False:
            all_results['coarse'] = self.postprocessing(
                ray_start, ray_dir, all_results['coarse'], hits, (S, V, P))
        
        all_results['other_logs'] = self.add_other_logs(all_results)
        all_results['samples'] = {
            'sampled_uv': all_results.get('sampled_uv', sampled_uv),
            'ray_start': ray_start,
            'ray_dir': ray_dir,
            'size':self.image_shape
        }

        ## compute loss
        loss, loss_log = self.criterion.compute_loss(all_results, all_results['samples'])

        return all_results


    def preprocess(self, frame):
        ### conducting voxelization (old not used)
        frame.depth[torch.logical_or(frame.depth < self.args.depth_cut_min,
                                      frame.depth > self.args.depth_cut_max)] = np.nan
        ### note that the tracker need cuda tensor, so it is necessary to move to cuda

        tracker_pc, tracker_normal = self.tracker.track_camera(frame.rgb, frame.depth, frame.calib,
                        self.first_iso if len(self.tracker.all_pd_pose) == 0 else None, for_pc=True)  ### point in camera

        opt_depth = frame.gt_pose @ tracker_pc ## eqs torch.from_numpy(frame.gt_pose.matrix[:,:3]).to(self.device).float()@tracker_pc.T
        opt_normal = frame.gt_pose.rotation @ tracker_normal
        corner_pos = self.map.integrate_keyframe(opt_depth, opt_normal, async_optimize=False,
                                         do_optimize=False)
        corner_pos = torch.unique(corner_pos.reshape(-1,3),dim=0)
        uv = self.reprojection(corner_pos, frame)
        ### check uv
        # test_img = np.zeros((480,640,3))
        # uv = torch.clamp_min(uv,0)
        # for i in uv:
        #     test_img = cv2.circle(test_img, i.cpu().numpy().astype(np.uint16), 1, (255, 0, 0), 1)
        # cv2.imwrite("test.jpg", test_img)

        uv, valid_mask = motion_util.remove_out_uv(uv, self.image_shape[0], self.image_shape[1])
        logging.info(f"there are {valid_mask.shape} pcs is valid")
        return corner_pos, uv, valid_mask

    def reprojection(self, Pw, frame):
        K = torch.from_numpy(frame.calib.to_K()).to(self.device).float() # 3*3
        extinsic = torch.from_numpy(frame.gt_pose.inv().matrix[:3,:]).to(self.device).float() #3*4
        Pw = motion_util.homo_vec(Pw)
        uv = K@extinsic@Pw
        uv = (uv / uv[2,:])[:2,:].T
        return uv

    def get_corresponding_feat(self, uv, image):
        '''
        get uv's corresponding feat using grid sample;
        uv: B N 2, index of pixel: from 0 to image shape;
        img: path or tensor;
        '''
        if isinstance(image, str):
            rgb_data = cv2.imread(str(image))
            rgb_data = cv2.cvtColor(rgb_data, cv2.COLOR_BGR2RGB) # eqs imageio.imread()[..., :3]
            ## tobe NCHW
            rgb = self.rgb_processer(rgb_data).unsqueeze(0).to(self.device)
        else:
            ## image is tensor with HWC
            if image.shape[-1] == 3:
                rgb = image.permute(2,0,1)
            rgb = self.rgb_norm(rgb)
        if len(rgb.shape) < 4:
            rgb = rgb.unsqueeze(0)
        self.img_encoder(rgb)
        if len(uv.shape) < 3:
            uv = uv.unsqueeze(0) # 1 N 2
        vtx_feat = self.img_encoder.index(uv, image_size= self.image_shape)
        return vtx_feat

    def get_corresponding_rays(self, index):
        '''
        this func is used for get pose corresponding ray dir
        index: int, the pose in dataset
        ray_start: camera origin
        ray_dir: all pixel's dir
        '''
        pose = self.gt_trajectory[index].matrix
        if pose.shape[-1] != 4:
            pose = data_utils.homo_pose(pose)
        ray_start, rotation = pose[:3, 3], pose[:3, :3]
        ray_dir = motion_util.get_ray_direction(ray_start, self.uv.reshape(2,-1), self.K, pose, 1)
        ro, rd = ray_start.astype(np.float32), ray_dir.transpose(1, 0).astype(np.float32)
        if False:
            Pw = ro+rd
            uv = (self.K@np.linalg.inv(pose)@motion_util.homo_vec(Pw)).T
            new_uv = uv[:,:2] / uv[:, 2][:,None]
            print(new_uv)
        return ro, rd

    def ray_intersect(self, ray_start, ray_dir, encoder_states):
        point_feats = encoder_states['voxel_vertex_idx'].unsqueeze(0) # (1,N,8)
        point_xyz = encoder_states['voxel_center_xyz'].unsqueeze(0) # (1,N,3)
        S = 1
        V = 1 # TODO: later can use V not same as 1
        P, _ = ray_dir.size() # V:views, P:num_points, 
        _, H, D = point_feats.size() # num_voxels

        # ray-voxel intersection
        ray_start = ray_start.expand_as(ray_dir).contiguous().view(S, V * P, 3).contiguous()
        ray_dir = ray_dir.reshape(S, V * P, 3).contiguous()

        if False:  # ray-voxel intersection with SVO self.use_octreez(self.use_octree)
            flatten_centers = encoder_states['voxel_octree_center_xyz']
            flatten_children = encoder_states['voxel_octree_children_idx']
            pts_idx, min_depth, max_depth = svo_ray_intersect(
                self.voxel_size, self.max_hits, flatten_centers, flatten_children,
                ray_start, ray_dir)
        else:   # ray-voxel intersection with all voxels
            pts_idx, min_depth, max_depth = aabb_ray_intersect(
                self.voxel_size, self.max_hits, point_xyz, ray_start, ray_dir) # dim=max_hits

        # sort the depths
        min_depth.masked_fill_(pts_idx.eq(-1), MAX_DEPTH)
        max_depth.masked_fill_(pts_idx.eq(-1), MAX_DEPTH)
        min_depth, sorted_idx = min_depth.sort(dim=-1)
        max_depth = max_depth.gather(-1, sorted_idx)
        pts_idx = pts_idx.gather(-1, sorted_idx)
        hits = pts_idx.ne(-1).any(-1)  # remove all points that completely miss the object
        
        if S > 1:  # extend the point-index to multiple shapes (just in case)
            pts_idx = (pts_idx + H * torch.arange(S, 
                device=pts_idx.device, dtype=pts_idx.dtype)[:, None, None]
                ).masked_fill_(pts_idx.eq(-1), -1)

        intersection_outputs = {
            "min_depth": min_depth,
            "max_depth": max_depth,
            "intersected_voxel_idx": pts_idx
        }
        return ray_start, ray_dir, intersection_outputs, hits

    def ray_sample(self, intersection_outputs):
        # sample points and use middle point approximation
        sampled_idx, sampled_depth, sampled_dists = inverse_cdf_sampling(
            intersection_outputs['intersected_voxel_idx'],
            intersection_outputs['min_depth'], 
            intersection_outputs['max_depth'], 
            intersection_outputs['probs'],
            intersection_outputs['steps'], 
            -1, False or (not self.training))
        sampled_dists = sampled_dists.clamp(min=0.0)
        sampled_depth.masked_fill_(sampled_idx.eq(-1), MAX_DEPTH)
        sampled_dists.masked_fill_(sampled_idx.eq(-1), 0.0)
        
        samples = {
            'sampled_point_depth': sampled_depth,
            'sampled_point_distance': sampled_dists,
            'sampled_point_voxel_idx': sampled_idx,
        }
        return samples

    def raymarching(self, ray_start, ray_dir, intersection_outputs, encoder_states, pos_feat=None, fine=False):
        
        samples = self.ray_sample(intersection_outputs)
        all_results = self.renderer.forward_chunk(pos_feat, self.field, ray_start, ray_dir, samples, encoder_states)

        # all_results['voxel_edges'] = self.encoder.get_edge(ray_start, ray_dir, samples, encoder_states)
        all_results['voxel_depth'] = samples['sampled_point_depth'][:, 0]
        return samples, all_results

    @torch.no_grad()
    def sample_pixels(self, uv, size, alpha=None, mask=None, **kwargs):
        H, W = int(size[0].item()), int(size[1].item())
        S, V = uv.shape[:2]

        if mask is None:
            if alpha is not None:
                mask = (alpha > 0)
            else:
                mask = uv.new_ones(S, V, uv.size(-1)).bool()
        mask = mask.float().reshape(S, V, H, W)

        # if self.args.sampling_at_center < 1.0:
        #     r = (1 - self.args.sampling_at_center) / 2.0
        #     mask0 = mask.new_zeros(S, V, H, W)
        #     mask0[:, :, int(H * r): H - int(H * r), int(W * r): W - int(W * r)] = 1
        #     mask = mask * mask0
        
        # if self.args.sampling_on_bbox:
        #     x_has_points = mask.sum(2, keepdim=True) > 0
        #     y_has_points = mask.sum(3, keepdim=True) > 0
        #     mask = (x_has_points & y_has_points).float()  

        probs = mask / (mask.sum() + 1e-8)
        sampling_on_mask = 1.
        if sampling_on_mask > 0.0:
            probs = sampling_on_mask * probs + (1 - sampling_on_mask) * 1.0 / (H * W)

        num_pixels = int(self.args.pixel_per_view)
        # patch_size, skip_size = self.args.sampling_patch_size, self.args.sampling_skipping_size
        patch_size, skip_size = 1, 1
        C = patch_size * skip_size
        
        if C > 1:
            probs = probs.reshape(S, V, H // C, C, W // C, C).sum(3).sum(-1)
            num_pixels = num_pixels // patch_size // patch_size

        flatten_probs = probs.reshape(S, V, -1) 
        sampled_index = motion_util.sampling_without_replacement(torch.log(flatten_probs+ TINY), num_pixels)
        sampled_masks = torch.zeros_like(flatten_probs).scatter_(-1, sampled_index, 1).reshape(S, V, H // C, W // C)

        if C > 1:
            sampled_masks = sampled_masks[:, :, :, None, :, None].repeat(
                1, 1, 1, patch_size, 1, patch_size).reshape(S, V, H // skip_size, W // skip_size)
            if skip_size > 1:
                full_datamask = sampled_masks.new_zeros(S, V, skip_size * skip_size, H // skip_size, W // skip_size)
                full_index = torch.randint(skip_size*skip_size, (S, V))
                for i in range(S):
                    for j in range(V):
                        full_datamask[i, j, full_index[i, j]] = sampled_masks[i, j]
                sampled_masks = full_datamask.reshape(
                    S, V, skip_size, skip_size, H // skip_size, W // skip_size).permute(0, 1, 4, 2, 5, 3).reshape(S, V, H, W)
        uv = torch.from_numpy(uv).to(size.device) # to cuda
        X, Y = uv[:,:,0].reshape(S, V, H, W), uv[:,:,1].reshape(S, V, H, W)
        X = X[sampled_masks>0].reshape(S, V, 1, -1, patch_size, patch_size)
        Y = Y[sampled_masks>0].reshape(S, V, 1, -1, patch_size, patch_size)
        return torch.cat([X, Y], 2), sampled_masks

    def postprocessing(self, ray_start, ray_dir, all_results, hits, sizes):
         # we need fill_in for NSVF for background
        S, V, P = sizes
        fullsize = S * V * P
        
        all_results['missed'] = data_utils.fill_in((fullsize, ), hits, all_results['missed'], 1.0).view(S, V, P)
        all_results['colors'] = data_utils.fill_in((fullsize, 3), hits, all_results['colors'], 0.0).view(S, V, P, 3)
        all_results['depths'] = data_utils.fill_in((fullsize, ), hits, all_results['depths'], 0.0).view(S, V, P)
        
        BG_DEPTH = self.field.bg_color.depth
        bg_color = self.field.bg_color(all_results['colors'])
        all_results['colors'] += all_results['missed'].unsqueeze(-1) * bg_color.reshape(fullsize, 3).view(S, V, P, 3)
        all_results['depths'] += all_results['missed'] * BG_DEPTH
        if 'normal' in all_results:
            all_results['normal'] = data_utils.fill_in((fullsize, 3), hits, all_results['normal'], 0.0).view(S, V, P, 3)
        if 'voxel_depth' in all_results:
            all_results['voxel_depth'] = data_utils.fill_in((fullsize, ), hits, all_results['voxel_depth'], BG_DEPTH).view(S, V, P)
        if 'voxel_edges' in all_results:
            all_results['voxel_edges'] = data_utils.fill_in((fullsize, 3), hits, all_results['voxel_edges'], 1.0).view(S, V, P, 3)
        if 'feat_n2' in all_results:
            all_results['feat_n2'] = data_utils.fill_in((fullsize,), hits, all_results['feat_n2'], 0.0).view(S, V, P)
        return all_results

    def add_other_logs(self, all_results):
        return {'voxs_log': data_utils.item(self.voxel_size),
                'stps_log': data_utils.item(self.step_size),
                'nvox_log': data_utils.item(self.map.keep.sum().item())}

        

class Map_for_preprocessing(nn.Module):
    def __init__(self, args: argparse.Namespace, device="cpu"):
        super().__init__()
        """
        Initialize a densely indexed latent map.
        For easy manipulation, invalid indices are -1, and occupied indices are >= 0.

        :param model:       neural network models
        :param latent_dim:  size of latent dim
        :param device:      device type of the map (some operations can still on host)
        :param optimization_device  note this does not take effect when using sync mode.
        """

        self.voxel_size = args.mapping.voxel_size
        steps = ((np.asarray(args.mapping.bound_max) - np.asarray(args.mapping.bound_min)) / 
            self.voxel_size).round().astype('int64') + 1
        self.n_xyz = steps
        x, y, z = [c.reshape(-1).astype('float32') for c in np.meshgrid(np.arange(steps[0]), np.arange(steps[1]), np.arange(steps[2]))]
        points_index_xyz = torch.from_numpy(np.stack([x, y, z]).T).long().to(device)
        points_index = self._linearize_id(points_index_xyz)
        x, y, z = x * self.voxel_size + np.asarray(args.mapping.bound_min)[0], \
                y * self.voxel_size + np.asarray(args.mapping.bound_min)[1], \
                z * self.voxel_size + np.asarray(args.mapping.bound_min)[2]
        fine_points = torch.from_numpy(np.stack([x, y, z]).T.astype('float32')).to(device)
        # transform from voxel centers to voxel corners (key/values)
        fine_coords, _ = motion_util.discretize_points(fine_points, self.voxel_size * .5) 
        fine_keys0 = motion_util.offset_points(fine_coords, 1.0).reshape(-1, 3) # (210*8, 3) note that this is tight cube, so every cube's corner is another cube's corner
        fine_keys, fine_feats = torch.unique(fine_keys0, dim=0, sorted=True, return_inverse=True) # keys: index of cube 
        fine_feats = fine_feats.reshape(-1, 8) # N, 8, content is index, means every voxel has its eight vertex, and saved in this fine_feats
        num_keys = torch.scalar_tensor(fine_keys.size(0)).long() # num of unique vertex
        ## origin di-fusion
        
        logging.info(f"Map size Nx = {self.n_xyz[0]}, Ny = {self.n_xyz[1]}, Nz = {self.n_xyz[2]}")
        self.device = device
        self.args = args
        self.latent_dim = self.args.code_length
        logging.info(f"in this exp we use code length of {self.latent_dim}")
        # self.bound_min = torch.tensor(args.mapping.bound_min).float().to(device)
        # self.bound_max = self.bound_min + self.voxel_size * torch.tensor(self.n_xyz).to(device)

        self.integration_offsets = [torch.tensor(t, device=self.device, dtype=torch.float32) for t in [
            [-0.5, -0.5, -0.5], [-0.5, -0.5, 0.5], [-0.5, 0.5, -0.5], [-0.5, 0.5, 0.5],
            [0.5, -0.5, -0.5], [0.5, -0.5, 0.5], [0.5, 0.5, -0.5], [0.5, 0.5, 0.5]
        ]]
        # Directly modifiable from outside.
        self.extract_mesh_std_range = None

        self.mesh_update_affected = [torch.tensor([t], device=self.device)
                                     for t in [[-1, 0, 0], [1, 0, 0],
                                               [0, -1, 0], [0, 1, 0],
                                               [0, 0, -1], [0, 0, 1]]]
        self.relative_network_offset = torch.tensor([[0.5, 0.5, 0.5]], device=self.device, dtype=torch.float32)

        # register parameters (will be saved to checkpoints)
        self.register_buffer("points", fine_points)          # voxel centers
        self.register_buffer("points_index", points_index)          # voxel centers index
        self.register_buffer("keys", fine_keys.long())       # id used to find voxel corners/embeddings
        self.register_buffer("feats", fine_feats.long())     # for each voxel, 8 voxel corner ids
        self.register_buffer("num_keys", num_keys)           # num of unique cornor
        self.register_buffer("keep", fine_feats.new_zeros(fine_feats.size(0)).long())  # whether the voxel will be pruned
        # self.register_buffer("max_hits", torch.scalar_tensor(args.max_hits))
        self.register_buffer("indexer", torch.ones(np.product(self.n_xyz), device=device, dtype=torch.long) * -1)
        self.register_buffer("values", torch.zeros((num_keys, 256), device=device, dtype=torch.float))
        self.bound_min = fine_points[0]
        self.bound_max = fine_points[-1]
        # self.values = motion_util.Embedding(num_keys, 256, None) # feat. 

    def save(self, path):
        if not isinstance(path, Path):
            path = Path(path)
        with path.open('wb') as f:
            torch.save(self.cold_vars, f)

    def load(self, path):
        if not isinstance(path, Path):
            path = Path(path)
        with path.open('rb') as f:
            self.cold_vars = torch.load(f)

    def _get_var(self, name):
        if threading.get_ident() == self.meshing_thread_id and name in self.backup_var_names:
            return self.backup_vars[name]
        else:
            return self.cold_vars[name]

    def _set_var(self, value, name):
        if threading.get_ident() == self.meshing_thread_id and name in self.backup_var_names:
            self.backup_vars[name] = value
        else:
            self.cold_vars[name] = value

    def _inflate_latent_buffer(self, count: int):
        '''
        count: int, new registered voxels num 
        return: new_inds: torch list,from known points to new ones
        '''
        target_n_occupied = self.keep.sum() + count
        # if self.latent_vecs.size(0) < target_n_occupied:
        #     new_size = self.latent_vecs.size(0) # double the size of self.latent_vecs to encode feat
        #     while new_size < target_n_occupied:
        #         new_size *= 2
        #     new_vec = torch.empty((new_size, self.latent_dim), dtype=torch.float32, device=self.device)
        #     new_vec[:self.latent_vecs.size(0)] = self.latent_vecs
        #     new_vec_pos = torch.ones((new_size, ), dtype=torch.long, device=self.device) * -1 # 2048
        #     new_vec_pos[:self.latent_vecs.size(0)] = self.latent_vecs_pos
        #     new_voxel_conf = torch.zeros((new_size, ), dtype=torch.float32, device=self.device) # 2048
        #     new_voxel_conf[:self.latent_vecs.size(0)] = self.voxel_obs_count
        #     new_voxel_optim = torch.zeros((new_size, ), dtype=torch.bool, device=self.device)
        #     new_voxel_optim[:self.latent_vecs.size(0)] = self.voxel_optimized
        #     new_vec[self.latent_vecs.size(0):].zero_()
        #     self.latent_vecs = new_vec
        #     self.latent_vecs_pos = new_vec_pos
        #     self.voxel_obs_count = new_voxel_conf
        #     self.voxel_optimized = new_voxel_optim

        new_inds = torch.arange(self.keep.sum(), target_n_occupied, device=self.device, dtype=torch.long)
        # self.n_occupied = target_n_occupied
        return new_inds

    def _linearize_id(self, xyz: torch.Tensor):
        """
        :param xyz (N, 3) long id
        :return: (N, ) lineraized id to be accessed in self.indexer
        """
        return xyz[:, 2] + self.n_xyz[-1] * xyz[:, 1] + (self.n_xyz[-1] * self.n_xyz[-2]) * xyz[:, 0]

    def _unlinearize_id(self, idx: torch.Tensor):
        """
        :param idx: (N, ) linearized id for access in self.indexer
        :return: xyz (N, 3) id to be indexed in 3D
        """
        return torch.stack([idx // (self.n_xyz[1] * self.n_xyz[2]),
                            (idx // self.n_xyz[2]) % self.n_xyz[1],
                            idx % self.n_xyz[2]], dim=-1)

    def _mark_updated_vec_id(self, new_vec_id: torch.Tensor):
        """
        :param new_vec_id: (B,) updated id (indexed in latent vectors)
        """
        self.mesh_cache.updated_vec_id = torch.cat([self.mesh_cache.updated_vec_id, new_vec_id])
        self.mesh_cache.updated_vec_id = torch.unique(self.mesh_cache.updated_vec_id)

    def allocate_block(self, idx: torch.Tensor):
        """
        :param idx: (N, 3) or (N, ), if the first one, will call linearize id.
        NOTE: this will not check index overflow!
        """
        if idx.ndimension() == 2 and idx.size(1) == 3:
            idx = self._linearize_id(idx)
        new_id = self._inflate_latent_buffer(idx.size(0))
        # self.latent_vecs_pos[new_id] = idx # use latent_pos to store idx, from 0-new ind, idx: flattened 

        self.indexer[idx] = new_id # indexer login flattened index # idx:10000,20000, NOTE:this func is may cause error.

        # update the keep to register the occupy status.
        # tmp = []
        for i in idx:
            index = self.points_index.eq(i).nonzero()[0]
            # tmp.append(index)
            self.keep[index] = 1
        # tmp = torch.stack(tmp)
        # return tmp
        
    def update_feat(self, pose_feat, encoder_states):
        '''
        we use this func to update the self.value
        '''
        self.values[encoder_states["unique_feat_idx"]] = pose_feat # N 256
        encoder_states["voxel_vertex_emb"] = self.values

    def _update_optimize_result_set(self, deintegrate_old: bool):
        idx = self.optimize_result_set.latent_ids
        assert idx is not None
        if not deintegrate_old:
            self.latent_vecs[idx] = self.optimize_result_set.new_latent_vecs
        else:
            cur_count = self.voxel_obs_count[idx].unsqueeze(-1)
            original_count = self.optimize_result_set.old_latent_obs_counts.unsqueeze(-1)
            delta_vecs_sum = self.latent_vecs[idx] * cur_count + (self.optimize_result_set.new_latent_vecs -
                                                                  self.optimize_result_set.old_latent_vecs) * original_count
            self.latent_vecs[idx] = delta_vecs_sum / cur_count

        self._mark_updated_vec_id(idx)
        self.voxel_optimized[idx] = True
        self.optimize_result_set.clear()     # trigger gc...

    STATUS_CONF_BIT = 1 << 0    # 1 conf: confidence
    STATUS_SURF_BIT = 1 << 1    # 2 surf: surface
    # bit as mask? for instance: eight bool or byte, 1 byte -> 8 options

    def _make_mesh_from_cache(self):
        vertices = self.mesh_cache.vertices.reshape((-1, 3))
        triangles = np.arange(vertices.shape[0]).reshape((-1, 3))

        final_mesh = o3d.geometry.TriangleMesh()
        # The pre-conversion is saving tons of time
        final_mesh.vertices = o3d.utility.Vector3dVector(vertices.astype(float))
        final_mesh.triangles = o3d.utility.Vector3iVector(triangles.astype(np.int32))

        # Assign color:
        if vertices.shape[0] > 0:
            import matplotlib.cm
            vert_color = self.mesh_cache.vertices_std.reshape((-1, )).astype(float)
            if self.extract_mesh_std_range is not None:
                vcolor_min, vcolor_max = self.extract_mesh_std_range
                vert_color = np.clip(vert_color, vcolor_min, vcolor_max)
            else:
                vcolor_min, vcolor_max = vert_color.min(), vert_color.max()
            vert_color = (vert_color - vcolor_min) / (vcolor_max - vcolor_min)
            vert_color = matplotlib.cm.jet(vert_color)[:, :3]
            final_mesh.vertex_colors = o3d.utility.Vector3dVector(vert_color)

        return final_mesh

    def _expand_flatten_id(self, base_flatten_id: torch.Tensor, ensure_valid: bool = True):
        ### use the func to dilate the voxel. 
        expanded_flatten_id = [base_flatten_id]
        updated_pos = self._unlinearize_id(base_flatten_id) # pos -> position
        for affected_offset in self.mesh_update_affected:
            rs_id = updated_pos + affected_offset
            for dim in range(3):
                rs_id[:, dim].clamp_(0, self.n_xyz[dim] - 1)
            rs_id = self._linearize_id(rs_id)
            if ensure_valid:
                rs_id = rs_id[self.indexer[rs_id] != -1]
            expanded_flatten_id.append(rs_id)
        expanded_flatten_id = torch.unique(torch.cat(expanded_flatten_id))
        return expanded_flatten_id

    def get_sdf(self, xyz: torch.Tensor):
        """
        Get the sdf value of the requested positions with computation graph built.
        :param xyz: (N, 3)
        :return: sdf: (M,), std (M,), valid_mask: (N,) with M elements being 1.
        """
        xyz_normalized = (xyz - self.bound_min.unsqueeze(0)) / self.voxel_size
        with torch.no_grad():
            grid_id = torch.ceil(xyz_normalized.detach()).long() - 1
            sample_latent_id = self.indexer[self._linearize_id(grid_id)]
            sample_valid_mask = sample_latent_id != -1
            # Prune validity by ignore-count.
            valid_valid_mask = self.voxel_obs_count[sample_latent_id[sample_valid_mask]] > self.args.ignore_count_th
            sample_valid_mask[sample_valid_mask.clone()] = valid_valid_mask
            valid_latent = self.latent_vecs[sample_latent_id[sample_valid_mask]]

        valid_xyz_rel = xyz_normalized[sample_valid_mask] - grid_id[sample_valid_mask] - self.relative_network_offset

        sdf, std = net_util.forward_model(self.model.decoder,
                                        latent_input=valid_latent, xyz_input=valid_xyz_rel, no_detach=True)
        return sdf.squeeze(-1), std.squeeze(-1), sample_valid_mask

    def extract_mesh(self, voxel_resolution: int, max_n_triangles: int, fast: bool = True,
                     max_std: float = 2000.0, extract_async: bool = False, no_cache: bool = False,
                     interpolate: bool = True):
        """
        Extract mesh using marching cubes.
        :param voxel_resolution: int, number of sub-blocks within an LIF block.
        :param max_n_triangles: int, maximum number of triangles.
        :param fast: whether to hierarchically extract sdf for speed improvement.
        :param interpolate: whether to interpolate sdf values.
        :param extract_async: if set to True, the function will only return a mesh when
                1) There is a change in the map.
                2) The request is completed.
                otherwise, it will just return None.
        :param no_cache: ignore cached mesh and restart over.
        :return: Open3D mesh.
        """
        if self.meshing_thread is not None:
            if not self.meshing_thread.is_alive():
                self.meshing_thread = None
                self.meshing_thread_id = -1
                self.backup_vars = {}
                return self._make_mesh_from_cache()
            elif not extract_async:
                self.meshing_thread.join()
                return self._make_mesh_from_cache()
            else:
                return None

        with self.modifying_lock:
            if self.mesh_cache.updated_vec_id.size(0) == 0 and not no_cache:
                return self._make_mesh_from_cache() if not extract_async else None
            else:
                # We can start meshing, Yay!
                if no_cache:
                    updated_vec_id = torch.arange(self.n_occupied, device=self.device)
                    self.mesh_cache.clear_all()
                else:
                    updated_vec_id = self.mesh_cache.updated_vec_id
                    self.mesh_cache.clear_updated_vec()
                if extract_async:
                    for b_name in self.backup_var_names:
                        self.backup_vars[b_name] = self.cold_vars[b_name]

        def do_meshing(voxel_resolution):
            torch.cuda.synchronize()
            with torch.cuda.stream(self.meshing_stream):
                focused_flatten_id = self.latent_vecs_pos[updated_vec_id]
                occupied_flatten_id = self._expand_flatten_id(focused_flatten_id)
                occupied_vec_id = self.indexer[occupied_flatten_id]  # (B, )
                # Remove voxels with too low confidence.
                occupied_vec_id = occupied_vec_id[self.voxel_obs_count[occupied_vec_id] > self.args.ignore_count_th]

                vec_id_batch_mapping = torch.ones((occupied_vec_id.max().item() + 1,), device=self.device, dtype=torch.int) * -1
                vec_id_batch_mapping[occupied_vec_id] = torch.arange(0, occupied_vec_id.size(0), device=self.device,
                                                                     dtype=torch.int)
                occupied_latent_vecs = self.latent_vecs[occupied_vec_id]  # (B, 125)
                B = occupied_latent_vecs.size(0)

                # Sample more data.
                sample_a = -(voxel_resolution // 2) * (1. / voxel_resolution)
                sample_b = 1. + (voxel_resolution - 1) // 2 * (1. / voxel_resolution)
                voxel_resolution *= 2

                low_resolution = voxel_resolution // 2 if fast else voxel_resolution
                low_samples = net_util.get_samples(low_resolution, self.device, a=sample_a, b=sample_b) - \
                              self.relative_network_offset # (l**3, 3)
                low_samples = low_samples.unsqueeze(0).repeat(B, 1, 1)  # (B, l**3, 3)
                low_latents = occupied_latent_vecs.unsqueeze(1).repeat(1, low_samples.size(1), 1)  # (B, l**3, 3)

                with torch.no_grad():
                    low_sdf, low_std = net_util.forward_model(self.model.decoder,
                                                        latent_input=low_latents.view(-1, low_latents.size(-1)),
                                                        xyz_input=low_samples.view(-1, low_samples.size(-1)))

                if fast:
                    low_sdf = low_sdf.reshape(B, 1, low_resolution, low_resolution, low_resolution)  # (B, 1, l, l, l)
                    low_std = low_std.reshape(B, 1, low_resolution, low_resolution, low_resolution)
                    high_sdf = torch.nn.functional.interpolate(low_sdf, mode='trilinear',
                                                               size=(voxel_resolution, voxel_resolution, voxel_resolution),
                                                               align_corners=True)
                    high_std = torch.nn.functional.interpolate(low_std, mode='trilinear',
                                                               size=(voxel_resolution, voxel_resolution, voxel_resolution),
                                                               align_corners=True)
                    high_sdf = high_sdf.squeeze(0).reshape(B, voxel_resolution ** 3)  # (B, H**3)
                    high_std = high_std.squeeze(0).reshape(B, voxel_resolution ** 3)

                    high_valid_lifs, high_valid_sbs = torch.where(high_sdf.abs() < 0.05)
                    if high_valid_lifs.size(0) > 0:
                        high_samples = net_util.get_samples(voxel_resolution, self.device, a=sample_a, b=sample_b) - \
                                       self.relative_network_offset  # (H**3, 3)
                        high_latents = occupied_latent_vecs[high_valid_lifs]  # (VH, 125)
                        high_samples = high_samples[high_valid_sbs]  # (VH, 3)

                        with torch.no_grad():
                            high_valid_sdf, high_valid_std = net_util.forward_model(self.model.decoder,
                                                                       latent_input=high_latents,
                                                                       xyz_input=high_samples)
                        high_sdf[high_valid_lifs, high_valid_sbs] = high_valid_sdf.squeeze(-1)
                        high_std[high_valid_lifs, high_valid_sbs] = high_valid_std.squeeze(-1)

                    high_sdf = high_sdf.reshape(B, voxel_resolution, voxel_resolution, voxel_resolution)
                    high_std = high_std.reshape(B, voxel_resolution, voxel_resolution, voxel_resolution)
                else:
                    high_sdf = low_sdf.reshape(B, low_resolution, low_resolution, low_resolution)
                    high_std = low_std.reshape(B, low_resolution, low_resolution, low_resolution)

                high_sdf = -high_sdf
                if interpolate:
                    vertices, vertices_flatten_id, vertices_std = system.ext.marching_cubes_interp(
                        self.indexer.view(self.n_xyz), focused_flatten_id, vec_id_batch_mapping,
                        high_sdf, high_std, max_n_triangles, self.n_xyz, max_std)  # (T, 3, 3), (T, ), (T, 3)
                else:
                    vertices, vertices_flatten_id = system.ext.marching_cubes(
                        self.indexer.view(self.n_xyz), focused_flatten_id, vec_id_batch_mapping,
                        high_sdf, max_n_triangles, self.n_xyz)  # (T, 3, 3), (T, ), (T, 3)
                    vertices_std = torch.zeros((vertices.size(0), 3), dtype=torch.float32, device=vertices.device)

                vertices = vertices * self.voxel_size + self.bound_min
                vertices = vertices.cpu().numpy()
                vertices_std = vertices_std.cpu().numpy()
                # Remove relevant cached vertices and append updated/new ones.
                vertices_flatten_id = vertices_flatten_id.cpu().numpy()
                if self.mesh_cache.vertices is None:
                    self.mesh_cache.vertices = vertices
                    self.mesh_cache.vertices_flatten_id = vertices_flatten_id
                    self.mesh_cache.vertices_std = vertices_std
                else:
                    p = np.sort(np.unique(vertices_flatten_id))
                    valid_verts_idx = _get_valid_idx(self.mesh_cache.vertices_flatten_id, p)
                    self.mesh_cache.vertices = np.concatenate([self.mesh_cache.vertices[valid_verts_idx], vertices], axis=0)
                    self.mesh_cache.vertices_flatten_id = np.concatenate([
                        self.mesh_cache.vertices_flatten_id[valid_verts_idx], vertices_flatten_id
                    ], axis=0)
                    self.mesh_cache.vertices_std = np.concatenate([self.mesh_cache.vertices_std[valid_verts_idx], vertices_std], axis=0)

        if extract_async:
            self.meshing_thread = threading.Thread(target=do_meshing, args=(voxel_resolution, ))
            self.meshing_thread_id = self.meshing_thread.ident
            self.meshing_thread.daemon = True
            self.meshing_thread.start()
        else:
            do_meshing(voxel_resolution)
            return self._make_mesh_from_cache()

    def get_total_voxel_corner(self):
        occupied_flatten_id = torch.where(self.indexer != -1)[0]  # (B, )
        blk_verts = [self._unlinearize_id(occupied_flatten_id) * self.voxel_size + self.bound_min]
        half_voxel = self.voxel_size*0.5
        # for vert_offset in [[0.0, 0.0, self.voxel_size], [0.0, self.voxel_size, 0.0],
        #                     [0.0, self.voxel_size, self.voxel_size], [self.voxel_size, 0.0, 0.0],
        #                     [self.voxel_size, 0.0, self.voxel_size], [self.voxel_size, self.voxel_size, 0.0],
        #                     [self.voxel_size, self.voxel_size, self.voxel_size]]:
        #     blk_verts.append(
        #         blk_verts[0] + torch.tensor(vert_offset, dtype=torch.float32, device=blk_verts[0].device).unsqueeze(0)
        #     )
        for vert_offset in [[-1., -1., -1.],
                            [-1., -1.,  1.],
                            [-1.,  1., -1.],
                            [-1.,  1.,  1.],
                            [ 1., -1., -1.],
                            [ 1., -1.,  1.],
                            [ 1.,  1., -1.],
                            [ 1.,  1.,  1.]]:
            blk_verts.append(
                blk_verts[0] + half_voxel*torch.tensor(vert_offset, dtype=torch.float32, device=blk_verts[0].device).unsqueeze(0)
            )
        blk_verts = torch.cat(blk_verts, dim=0).float()
        return blk_verts, occupied_flatten_id

    def get_info_as_NSVF(self):
        '''
        Usage: use this func to get voxel info
        NOTE: the voxel defination is same as NSVF
        '''
        # occupied_flatten_id = torch.where(self.indexer != -1)[0]  # (B, )
        points = self.points[self.keep.bool()]
        # points[:, 0] += (self.voxel_size / 10) # origin NSVF use this bia to aviod encounter -1
        feats  = self.feats[self.keep.bool()]
        ### the defination is different and use naive method instead!
        # use the feats to judge the corner positon
        unique_feat_idx, _ = torch.unique(feats.flatten(), sorted=True, return_inverse=True)
        unique_cor_idx = self.keys[unique_feat_idx]

        # all_corners = self.keys[feats.flatten()]*self.voxel_size*0.5 + self.bound_min # transfer to pos
        # all_corners = torch.unique(all_corners, dim=0)
        voxel_pos_unique = unique_cor_idx*self.voxel_size*0.5+self.bound_min # this is updated version

        encoder_states = {
            'voxel_vertex_idx': feats, # (K, 8)
            'voxel_center_xyz': points, # center of voxel, (K,1)
            'voxel_pos_unique': voxel_pos_unique, # N, 3, position of voxel corner
            'unique_feat_idx': unique_feat_idx # use for index self.keys & self.values
        }
        return encoder_states

    def integrate_keyframe(self, surface_xyz: torch.Tensor, surface_normal: torch.Tensor, do_optimize: bool = False, async_optimize: bool = False):
        """
        :param surface_xyz:  (N, 3) x, y, z
        :param surface_normal: (N, 3) nx, ny, nz
        :param do_optimize: whether to do optimization (this will be slow though)
        :param async_optimize: whether to spawn a separate job to optimize.
            Note: the optimization is based on the point at this function call.
                  optimized result will be updated on the next function call after it's ready.
            Caveat: If two optimization thread are started simultaneously, results may not be accurate.
                    Although we explicitly ban this, user can also trigger this by call the function with async_optimize = True+False.
                    Please use consistent `async_optimize` during a SLAM session.
        :return:
        """
        assert surface_xyz.device == surface_normal.device == self.device, \
            f"Device of map {self.device} and input observation " \
            f"{surface_xyz.device, surface_normal.device} must be the same."

        # -- 1. Allocate new voxels --
        surface_xyz_zeroed = surface_xyz - self.bound_min.unsqueeze(0)
        surface_xyz_normalized = surface_xyz_zeroed / self.voxel_size
        surface_grid_id = torch.ceil(surface_xyz_normalized).long() - 1 # here compute all voxels 
        surface_grid_id = self._linearize_id(surface_grid_id) # 65931

        # Remove the observations where it is too sparse.
        unq_mask = None
        if self.args.mapping.prune_min_vox_obs > 0:
            '''
            torch.unique: return, list, inverse_indices, count
            inverse_indices: representing the indices for where elements in the original input map to in the output; 
            otherwise, this function will only return a single tensor.
            '''
            _, unq_inv, unq_count = torch.unique(surface_grid_id, return_counts=True, return_inverse=True) # 2471 valid voxels
            unq_mask = (unq_count > self.args.mapping.prune_min_vox_obs)[unq_inv]
            surface_xyz_normalized = surface_xyz_normalized[unq_mask]
            surface_grid_id = surface_grid_id[unq_mask]
            surface_normal = surface_normal[unq_mask]

        # Identify empty cells, fill the indexer.
        invalid_surface_ind = self.indexer[surface_grid_id] == -1 # find points not registered
        if invalid_surface_ind.sum() > 0:
            ## this means some points is not registered in self.indexer
            invalid_flatten_id = torch.unique(surface_grid_id[invalid_surface_ind])
            #TODO: We expand this because we want to create some dummy voxels which helps the mesh extraction.
            invalid_flatten_id = self._expand_flatten_id(invalid_flatten_id, ensure_valid=True)
            invalid_flatten_id = invalid_flatten_id[self.indexer[invalid_flatten_id] == -1] ## new located indexer
            self.allocate_block(invalid_flatten_id)
        # # TODO: maybe use this func
        # def get_pruned_surface(enabled=True, lin_pos=None):
        #     # Prune useless surface points for quicker gathering (set to True to enable)
        #     # maintain one voxel one point.
        #     if enabled:
        #         encoder_voxel_pos_exp = self._expand_flatten_id(lin_pos, False)
        #         # encoder_voxel_pos_exp = lin_pos
        #         exp_indexer = torch.zeros_like(self.indexer)
        #         exp_indexer[encoder_voxel_pos_exp] = 1
        #         focus_mask = exp_indexer[surface_grid_id] == 1
        #         return surface_xyz_normalized[focus_mask], surface_normal[focus_mask]
        #     else:
        #         return surface_xyz_normalized, surface_normal

        # # -- 2. Get all voxels whose confidence is lower than optimization threshold and encoder them --
        # # Find my voxels conditions:
        # #   1) Voxel confidence < Threshold
        # #   2) Voxel is valid.
        # #   3) Not optimized.
        # #   4) There is surface points in the [-0.5 - 0.5] range of this voxel.
        # map_status = torch.zeros(np.product(self.n_xyz), device=self.device, dtype=torch.short) # num of all voxels

        # encoder_voxel_pos = self.latent_vecs_pos[torch.logical_and(self.voxel_obs_count < self.args.mapping.encoder_count_th,
        #                                                            self.latent_vecs_pos >= 0)]
        # map_status[encoder_voxel_pos] |= self.STATUS_CONF_BIT

        # if encoder_voxel_pos.size(0) > 0:
        #     pruned_surface_xyz_normalized, pruned_surface_normal = get_pruned_surface(
        #         enabled=True, lin_pos=encoder_voxel_pos)

    def ray_intersect(self, ray_start, ray_dir, encoder_states):
        point_feats = encoder_states['voxel_vertex_idx'] 
        point_xyz = encoder_states['voxel_center_xyz']
        S, V, P, _ = ray_dir.size()
        # _, H, D = point_feats.size()

        # ray-voxel intersection
        ray_start = ray_start.expand_as(ray_dir).contiguous().view(S, V * P, 3).contiguous()
        ray_dir = ray_dir.reshape(S, V * P, 3).contiguous()

        if False:  # ray-voxel intersection with SVO # TODO: maybe should consider SVO
            flatten_centers = encoder_states['voxel_octree_center_xyz']
            flatten_children = encoder_states['voxel_octree_children_idx']
            pts_idx, min_depth, max_depth = svo_ray_intersect(
                self.voxel_size, self.max_hits, flatten_centers, flatten_children,
                ray_start, ray_dir)
        else:   # ray-voxel intersection with all voxels
            pts_idx, min_depth, max_depth = aabb_ray_intersect(
                self.voxel_size, self.max_hits, point_xyz, ray_start, ray_dir)

        # sort the depths
        min_depth.masked_fill_(pts_idx.eq(-1), MAX_DEPTH)
        max_depth.masked_fill_(pts_idx.eq(-1), MAX_DEPTH)
        min_depth, sorted_idx = min_depth.sort(dim=-1)
        max_depth = max_depth.gather(-1, sorted_idx)
        pts_idx = pts_idx.gather(-1, sorted_idx)
        hits = pts_idx.ne(-1).any(-1)  # remove all points that completely miss the object
        
        # if S > 1:  # extend the point-index to multiple shapes (just in case)
        #     pts_idx = (pts_idx + H * torch.arange(S, 
        #         device=pts_idx.device, dtype=pts_idx.dtype)[:, None, None]
        #         ).masked_fill_(pts_idx.eq(-1), -1)

        intersection_outputs = {
            "min_depth": min_depth,
            "max_depth": max_depth,
            "intersected_voxel_idx": pts_idx
        }
        return ray_start, ray_dir, intersection_outputs, hits

    def get_fast_preview_visuals(self):
        occupied_flatten_id = torch.where(self.indexer != -1)[0]  # (B, )
        blk_verts = [self._unlinearize_id(occupied_flatten_id) * self.voxel_size + self.bound_min]
        n_block = blk_verts[0].size(0)
        blk_edges = []
        for vert_offset in [[0.0, 0.0, self.voxel_size], [0.0, self.voxel_size, 0.0],
                            [0.0, self.voxel_size, self.voxel_size], [self.voxel_size, 0.0, 0.0],
                            [self.voxel_size, 0.0, self.voxel_size], [self.voxel_size, self.voxel_size, 0.0],
                            [self.voxel_size, self.voxel_size, self.voxel_size]]:
            blk_verts.append(
                blk_verts[0] + torch.tensor(vert_offset, dtype=torch.float32, device=blk_verts[0].device).unsqueeze(0)
            )
        for vert_edge in [[0, 1], [0, 2], [0, 4], [1, 3], [1, 5], [2, 3], [2, 6], [3, 7], [4, 5], [4, 6], [5, 7], [6, 7]]:
            blk_edges.append(np.stack([np.arange(n_block, dtype=np.int32) + vert_edge[0] * n_block,
                                       np.arange(n_block, dtype=np.int32) + vert_edge[1] * n_block], axis=1))
        blk_verts = torch.cat(blk_verts, dim=0).cpu().numpy().astype(float)
        blk_wireframe = o3d.geometry.LineSet(
            points=o3d.utility.Vector3dVector(blk_verts),
            lines=o3d.utility.Vector2iVector(np.concatenate(blk_edges, axis=0)))
        from utils import vis_util
        return [
            blk_wireframe,
            vis_util.wireframe_bbox(self.bound_min.cpu().numpy(),
                                    self.bound_max.cpu().numpy(), color_id=4)
        ]

    def get_map_visuals(self, return_blocks: bool = False, return_samples: bool = False, return_uncertainty: bool = False,
                        return_mesh: bool = False,
                        sample_range: list = None, voxel_resolution: int = 8, include_bound: bool = False):
        """
        :param return_blocks: whether to include blocks in the visualization.
        :param return_samples: whether to include sdf samples (at voxel resolution)
            Note: only for debugging purpose, can be removed in the future
        :param return_mesh: whether to extract mesh.
        :param sample_range: [low-sdf, high-sdf]
        :param voxel_resolution: int, number of sub-blocks within an LIF block.
        :param include_bound: bool. whether to return the map bound when return_blocks
        :return:
        """
        from utils import vis_util

        map_visuals = MapVisuals()

        if return_blocks:
            occupied_flatten_id = torch.where(self.indexer != -1)[0]  # (B, )
            blk_xyz = self._unlinearize_id(occupied_flatten_id)
            blk_start = blk_xyz * self.voxel_size + self.bound_min
            blk_start = blk_start.cpu().numpy()
            occupied_flatten_id = occupied_flatten_id.cpu().numpy()

            blk_wireframes = []
            blk_dim = np.asarray([self.voxel_size, self.voxel_size, self.voxel_size])
            for blk_start_i, ofid in zip(blk_start, occupied_flatten_id):
                if ofid in self.debug_show_blocks:
                    blk_wireframes.append(vis_util.wireframe_bbox(blk_start_i, blk_start_i + blk_dim,
                                                                  solid=True, color_id=4))
                else:
                    blk_wireframes.append(vis_util.wireframe_bbox(blk_start_i, blk_start_i + blk_dim, solid=False))

            blk_wireframes.append(vis_util.wireframe_bbox(self.bound_min.cpu().numpy(),
                                                          self.bound_max.cpu().numpy(), color_id=4))

            map_visuals.blocks = vis_util.merged_entities(blk_wireframes)

        if return_mesh:
            map_visuals.mesh = [self.extract_mesh(voxel_resolution, int(1e7), extract_async=False)]

        if return_samples or return_uncertainty:
            occupied_flatten_id = torch.where(self.indexer != -1)[0]  # (B, )
            occupied_vec_id = self.indexer[occupied_flatten_id]  # (B, )
            occupied_vec_id = occupied_vec_id[self.voxel_obs_count[occupied_vec_id] > self.args.ignore_count_th]
            occupied_latent_vecs = self.latent_vecs[occupied_vec_id]  # (B, 125)
            B = occupied_latent_vecs.size(0)

            high_samples = net_util.get_samples(voxel_resolution, self.device) - self.relative_network_offset # (H**3, 3)
            high_samples = high_samples.unsqueeze(0).repeat(B, 1, 1)  # (B, H**3, 3)
            high_latents = occupied_latent_vecs.unsqueeze(1).repeat(1, high_samples.size(1), 1)  # (B, H**3, 125)

            with torch.no_grad():
                high_sdf, high_uncertainty = net_util.forward_model(self.model.decoder,
                                                  latent_input=high_latents.view(-1, high_latents.size(-1)),
                                                  xyz_input=high_samples.view(-1, high_samples.size(-1)))

            high_sdf = high_sdf.reshape(B, voxel_resolution, voxel_resolution, voxel_resolution)
            high_uncertainty = high_uncertainty.reshape(B, voxel_resolution, voxel_resolution, voxel_resolution)

            vis_grid_base = self._unlinearize_id(self.latent_vecs_pos[occupied_vec_id])
            vis_sample_pos = high_samples + vis_grid_base.unsqueeze(1).repeat(1, high_samples.size(1), 1) + self.relative_network_offset
            vis_sample_pos = (vis_sample_pos.reshape(-1, 3) * self.voxel_size + self.bound_min).cpu().numpy()
            high_sdf = high_sdf.reshape(-1).cpu().numpy()
            high_uncertainty = high_uncertainty.reshape(-1).cpu().numpy()

            if sample_range is None:
                vis_high_sdf = (high_sdf - high_sdf.min()) / (high_sdf.max() - high_sdf.min())
                vis_std = (high_uncertainty - high_uncertainty.min()) / (high_uncertainty.max() - high_uncertainty.min())
                print(f"Uncertainty normalized to {high_uncertainty.min().item()} ~ {high_uncertainty.max().item()}")
            else:
                vis_high_sdf = (high_sdf - sample_range[0]) / (sample_range[1] - sample_range[0])
                vis_std = (high_uncertainty - sample_range[0]) / (sample_range[1] - sample_range[0])
                vis_high_sdf = np.clip(vis_high_sdf, 0.0, 1.0)
                vis_std = np.clip(vis_std, 0.0, 1.0)

            if return_samples:
                map_visuals.samples = [vis_util.pointcloud(vis_sample_pos, cfloat=vis_high_sdf)]
            if return_uncertainty:
                map_visuals.uncertainty = [vis_util.pointcloud(vis_sample_pos, cfloat=vis_std)]

        return map_visuals

    def get_extract_pos(self, voxel_index):
        assert voxel_index.shape[1]==3
        coor = self.bound_min + voxel_index*self.voxel_size
        corner_pos = []
        offset = (torch.stack(self.integration_offsets)+0.5)*self.voxel_size
        for i in coor:
            corner_pos.append(i+offset)
        corner_pos = torch.stack(corner_pos) # N 8 3

        return corner_pos

    def integrate_keyframe_old(self, surface_xyz: torch.Tensor, surface_normal: torch.Tensor, do_optimize: bool = False, async_optimize: bool = False):
        """
        :param surface_xyz:  (N, 3) x, y, z
        :param surface_normal: (N, 3) nx, ny, nz
        :param do_optimize: whether to do optimization (this will be slow though)
        :param async_optimize: whether to spawn a separate job to optimize.
            Note: the optimization is based on the point at this function call.
                  optimized result will be updated on the next function call after it's ready.
            Caveat: If two optimization thread are started simultaneously, results may not be accurate.
                    Although we explicitly ban this, user can also trigger this by call the function with async_optimize = True+False.
                    Please use consistent `async_optimize` during a SLAM session.
        :return:
        """
        assert surface_xyz.device == surface_normal.device == self.device, \
            f"Device of map {self.device} and input observation " \
            f"{surface_xyz.device, surface_normal.device} must be the same."

        # This lock prevents meshing thread reading error.
        self.modifying_lock.acquire()

        # -- 0. Update map if optimization thread is ready.
        # if not self.optimize_process.res_queue.empty():
        #     self.optimize_result_set.new_latent_vecs = self.optimize_process.res_queue.get()
        #     self._update_optimize_result_set(deintegrate_old=True)

        # -- 1. Allocate new voxels --
        surface_xyz_zeroed = surface_xyz - self.bound_min.unsqueeze(0)
        surface_xyz_normalized = surface_xyz_zeroed / self.voxel_size
        surface_grid_id = torch.ceil(surface_xyz_normalized).long() - 1 # here compute all voxels 
        surface_grid_id = self._linearize_id(surface_grid_id) # 65931

        # Remove the observations where it is too sparse.
        unq_mask = None
        if self.args.mapping.prune_min_vox_obs > 0:
            '''
            torch.unique: return, list, inverse_indices, count
            inverse_indices: representing the indices for where elements in the original input map to in the output; 
            otherwise, this function will only return a single tensor.
            '''
            _, unq_inv, unq_count = torch.unique(surface_grid_id, return_counts=True, return_inverse=True) # 2471 valid voxels
            unq_mask = (unq_count > self.args.mapping.prune_min_vox_obs)[unq_inv]
            surface_xyz_normalized = surface_xyz_normalized[unq_mask]
            surface_grid_id = surface_grid_id[unq_mask] # 32391
            surface_normal = surface_normal[unq_mask]

        # Identify empty cells, fill the indexer.
        invalid_surface_ind = self.indexer[surface_grid_id] == -1 # find points not registered
        if invalid_surface_ind.sum() > 0:
            ## this means some points is not registered in self.indexer
            invalid_flatten_id = torch.unique(surface_grid_id[invalid_surface_ind])
            #TODO: We expand this because we want to create some dummy voxels which helps the mesh extraction.
            invalid_flatten_id = self._expand_flatten_id(invalid_flatten_id, ensure_valid=True)
            invalid_flatten_id = invalid_flatten_id[self.indexer[invalid_flatten_id] == -1] ## new located indexer
            self.allocate_block(invalid_flatten_id)

        def get_pruned_surface(enabled=True, lin_pos=None):
            # Prune useless surface points for quicker gathering (set to True to enable)
            # maintain one voxel one point.
            if enabled:
                encoder_voxel_pos_exp = self._expand_flatten_id(lin_pos, False)
                # encoder_voxel_pos_exp = lin_pos
                exp_indexer = torch.zeros_like(self.indexer)
                exp_indexer[encoder_voxel_pos_exp] = 1
                focus_mask = exp_indexer[surface_grid_id] == 1
                return surface_xyz_normalized[focus_mask], surface_normal[focus_mask]
            else:
                return surface_xyz_normalized, surface_normal

        # -- 2. Get all voxels whose confidence is lower than optimization threshold and encoder them --
        # Find my voxels conditions:
        #   1) Voxel confidence < Threshold
        #   2) Voxel is valid.
        #   3) Not optimized.
        #   4) There is surface points in the [-0.5 - 0.5] range of this voxel.
        map_status = torch.zeros(np.product(self.n_xyz), device=self.device, dtype=torch.short) # num of all voxels

        encoder_voxel_pos = self.latent_vecs_pos[torch.logical_and(self.voxel_obs_count < self.args.mapping.encoder_count_th,
                                                                   self.latent_vecs_pos >= 0)]
        map_status[encoder_voxel_pos] |= self.STATUS_CONF_BIT
        # self.map_status[surface_grid_id] |= self.STATUS_SURF_BIT

        if encoder_voxel_pos.size(0) > 0:
            pruned_surface_xyz_normalized, pruned_surface_normal = get_pruned_surface(
                enabled=True, lin_pos=encoder_voxel_pos)

            # Gather surface samples for encoder inference
            gathered_surface_latent_inds = []
            gathered_surface_xyzn = []
            # gatherd_surf_gid = []
            for offset in self.integration_offsets:
                _surface_grid_id = torch.ceil(pruned_surface_xyz_normalized + offset) - 1
                for dim in range(3):
                    _surface_grid_id[:, dim].clamp_(0, self.n_xyz[dim] - 1)
                surface_relative_xyz = pruned_surface_xyz_normalized - _surface_grid_id - self.relative_network_offset
                surf_gid = self._linearize_id(_surface_grid_id.long()) # grid index
                # gatherd_surf_gid.append(surf_gid)
                surface_latent_ind = self.indexer[surf_gid]
                in_focus_obs_mask = map_status[surf_gid] >= (self.STATUS_CONF_BIT)
                gathered_surface_latent_inds.append(surface_latent_ind[in_focus_obs_mask])
                gathered_surface_xyzn.append(torch.cat(
                    [surface_relative_xyz[in_focus_obs_mask],
                     pruned_surface_normal[in_focus_obs_mask]
                     ], dim=-1))
            gathered_surface_xyzn = torch.cat(gathered_surface_xyzn)
            gathered_surface_latent_inds = torch.cat(gathered_surface_latent_inds)
            # gatherd_surf_gid = torch.cat(gatherd_surf_gid)

            surface_blatent_mapping, pinds, pcounts = torch.unique(gathered_surface_latent_inds, return_inverse=True,
                                                                   return_counts=True)
            pcounts = pcounts.float()

            logging.info(f"{surface_blatent_mapping.size(0)} voxels will be updated by the encoder. "
                         f"Points/Voxel: avg = {pcounts.mean().item()}, "
                         f"min = {pcounts.min().item()}, "
                         f"max = {pcounts.max().item()}")

        map_status.zero_()
        '''
            # # -- 3. Get all voxels whose confidence is higher than optimization threshold and not marked and optimize them.
            # # Another important criterion is that current frame must have enough good observation.
            # # Find my voxels.
            # if do_optimize and (not self.optimize_process.is_busy() and self.optimize_process.res_queue.empty()) and \
            #         self.args.optim_n_iters > 0:
            #     optim_voxel_pos = self.latent_vecs_pos[torch.logical_and(self.voxel_obs_count >= self.args.encoder_count_th,
            #                                                              ~self.voxel_optimized)]
            #     optim_voxel_pos = optim_voxel_pos[optim_voxel_pos > 0]

            #     if optim_voxel_pos.size(0) > 0:
            #         map_status[optim_voxel_pos] |= self.STATUS_CONF_BIT
            #         pruned_surface_xyz_normalized, pruned_surface_normal = get_pruned_surface(
            #             enabled=True, lin_pos=optim_voxel_pos)

            #         # Gather surface samples for encoder inference
            #         gathered_latent_inds = []
            #         gathered_relative_xyz = []
            #         gathered_sdf = []

            #         for offset in self.integration_offsets:
            #             _surface_grid_id = torch.ceil(pruned_surface_xyz_normalized + offset) - 1
            #             for dim in range(3):
            #                 _surface_grid_id[:, dim].clamp_(0, self.n_xyz[dim] - 1)
            #             surface_relative_xyz = pruned_surface_xyz_normalized - _surface_grid_id - self.relative_network_offset
            #             lin_pos = self._linearize_id(_surface_grid_id.long())
            #             surface_latent_ind = self.indexer[lin_pos]
            #             in_focus_obs_mask = map_status[lin_pos] >= self.STATUS_CONF_BIT
            #             gathered_latent_inds.append(surface_latent_ind[in_focus_obs_mask])
            #             cur_rel_xyz = surface_relative_xyz[in_focus_obs_mask]
            #             cur_normal = pruned_surface_normal[in_focus_obs_mask]
            #             cur_sdf = torch.randn(cur_rel_xyz.size(0), device=cur_rel_xyz.device, dtype=torch.float32) * 0.05
            #             cur_rel_xyz = cur_rel_xyz + cur_sdf.unsqueeze(-1) * cur_normal
            #             gathered_relative_xyz.append(cur_rel_xyz)
            #             gathered_sdf.append(cur_sdf)

            #         gathered_latent_inds = torch.cat(gathered_latent_inds)
            #         gathered_relative_xyz = torch.cat(gathered_relative_xyz)
            #         gathered_sdf = torch.cat(gathered_sdf)

            #         latent_id_subset_uniques, latent_id_inv_mapping = torch.unique(gathered_latent_inds, return_inverse=True)
            #         latent_vecs_unique = self.latent_vecs[latent_id_subset_uniques]

            #         optimize_kwargs = {
            #             "args": self.args,
            #             "latent_vecs_unique": latent_vecs_unique,
            #             "latent_id_inv_mapping": latent_id_inv_mapping,
            #             "gathered_sdf": gathered_sdf,
            #             "gathered_relative_xyz": gathered_relative_xyz,
            #         }

            #         self.optimize_result_set.latent_ids = latent_id_subset_uniques
            #         if not async_optimize:
            #             self.optimize_result_set.new_latent_vecs = self.optimize_process.do_optimize(decoder=self.model.decoder, **optimize_kwargs)
            #             self._update_optimize_result_set(deintegrate_old=False)
            #         else:
            #             self.optimize_result_set.old_latent_vecs = torch.clone(latent_vecs_unique)
            #             self.optimize_result_set.old_latent_obs_counts = self.voxel_obs_count[latent_id_subset_uniques]
            #             self.optimize_process.job_queue.put(optimize_kwargs)

            #     # End if optim_voxel_pos.size(0) > 0
            # # End if do_optimize
        '''
        self.modifying_lock.release()
        # return unq_mask


if __name__=="__main__":
    from torch.utils.data import DataLoader
    device = torch.device("cuda:0")
    parser = exp_util.ArgumentParserX()
    args = parser.parse_args()
    args.mapping = exp_util.dict_to_args(args.mapping)
    args.sequence_kwargs = exp_util.dict_to_args(args.sequence_kwargs)
    args.tracking = exp_util.dict_to_args(args.tracking)

    seq_dataset = ScannetSequence(args=args, path=args.sequence_kwargs.path,
        start_frame=0,end_frame=-1,device = device, load_gt=True)
    # lif_loader = DataLoader(
    #     seq_dataset,
    #     batch_size=2,
    #     shuffle=True,
    #     num_workers=8,
    #     drop_last=True,
    # )
    # for dict in lif_loader:
    #     print(dict)
    #     break

    p = seq_dataset.forward(index = 0)
