import copy
import glob
import logging
import os
from collections import defaultdict
import cv2
import numpy as np
import torch
import torch.nn as nn
from fairnr.clib import (aabb_ray_intersect, inverse_cdf_sampling,
                         svo_ray_intersect, triangle_ray_intersect,
                         uniform_ray_sampling)
from system.ext import (estimate_normals, gradient_xy, remove_radius_outlier,
                        rgb_odometry, unproject_depth)
from trainer import make_encoder
from utils import data_utils, exp_util, motion_util, vis_util

logger = logging.getLogger(__name__)
MAX_DEPTH = 10000.0
TINY = 1e-9


class ShapeDataset():
    """
    A dataset that only returns data per shape
    """
    def __init__(self, 
                paths, 
                preload=True,
                repeat=1,
                subsample_valid=-1,
                ids=None):
        
        if os.path.isdir(paths):
            self.paths = [paths]
        else:
            self.paths = [line.strip() for line in open(paths)]

        self.subsample_valid = subsample_valid
        self.total_num_shape = len(self.paths)
        self.cache = None
        self.repeat = repeat

        # -- load per-shape data
        _data_per_shape = {}
        _data_per_shape['shape'] = list(range(len(self.paths)))
        _ixts = self.find_intrinsics()
        _glbs = self.find_global()
        if len(_ixts) > 0:
            _data_per_shape['ixt'] = _ixts
        if len(_glbs) > 0:
            _data_per_shape['glb'] = _glbs

        if self.subsample_valid > -1:
            for key in _data_per_shape:
                _data_per_shape[key] = _data_per_shape[key][::self.subsample_valid]
            self.paths = self.paths[::self.subsample_valid]
            self.total_num_shape = len(self.paths)
        
        # group the data..
        data_list = []
        for r in range(repeat):
            # HACK: making several copies to enable multi-GPU usage.
            if r == 0 and preload:
                self.cache = []
                logger.info('pre-load the dataset into memory.')

            for id in range(self.total_num_shape): 
                element = {}
                for key in _data_per_shape:
                    element[key] = _data_per_shape[key][id]
                data_list.append(element)

                if r == 0 and preload:
                    self.cache += [self._load_batch(data_list, id)]

        # group the data together
        self.data = data_list
        ### in this exp, we use const intrinsic:
        fx, fy, cx, cy = [481.2, 480.0, 319.50, 239.50]
        self.full_intrinsic = np.array([[fx, 0., cx, 0.],
                               [0., fy, cy, 0],
                               [0., 0, 1, 0],
                               [0, 0, 0, 1]])

    def find_intrinsics(self):
        ixt_list = []
        for path in self.paths:
            if os.path.exists(path + '/intrinsic.txt'):
                ixt_list.append(path + '/intrinsic.txt')
            elif os.path.exists(path + '/intrinsics.txt'):
                ixt_list.append(path + '/intrinsics.txt')
        return ixt_list

    def find_global(self):
        glb_list = []
        for path in self.paths:
            if os.path.exists(path + '/global.txt'):
                glb_list.append(path + '/global.txt')
        return glb_list

    def _load_shape(self, packed_data):  
        intrinsics = self.full_intrinsic
        shape_id = packed_data['shape']
        shape_data = {'intrinsics': intrinsics, 'id': shape_id}
        if packed_data.get('glb', None) is not None:   # additional global feature (if any)
           shape_data['global_index'] = np.loadtxt(packed_data['glb']).astype('int64')
        return shape_data

    def _load_batch(self, data, index):
        return index, self._load_shape(data[index])

    def __getitem__(self, index):
        if self.cache is not None:
            return self.cache[index % self.total_num_shape][0], \
                   self.cache[index % self.total_num_shape][1]
        return self._load_batch(self.data, index)

    def __len__(self):
        return len(self.data)

class ShapeViewDataset(ShapeDataset):
    """
    A dataset contains a series of images renderred offline for an object.
    """

    def __init__(self, 
                paths, 
                views,
                num_view,
                subsample_valid=-1,
                resolution=None, 
                load_depth=False,
                load_mask=False,
                train=True,
                preload=False,
                repeat=1,
                binarize=True,
                bg_color="1,1,1",
                min_color=-1,
                ids=None):
        
        super().__init__(paths, False, repeat, subsample_valid, ids)

        self.train = train
        self.load_depth = load_depth
        self.load_mask = load_mask
        self.views = views
        self.num_view = num_view

        if isinstance(resolution, str):
            self.resolution = [int(r) for r in resolution.split('x')]
        else:
            self.resolution = [resolution, resolution]
        self.world2camera = True
        self.cache_view = None
        
        bg_color = [float(b) for b in bg_color.split(',')] \
            if isinstance(bg_color, str) else [bg_color]
        if min_color == -1:
            bg_color = [b * 2 - 1 for b in bg_color]
        if len(bg_color) == 1:
            bg_color = bg_color + bg_color + bg_color
        self.bg_color = bg_color
        self.min_color = min_color
        self.apply_mask_color = (self.bg_color[0] >= -1) & (self.bg_color[0] <= 1)  # if need to apply

        # -- load per-view data
        _data_per_view = {}
        _data_per_view['rgb'] = [sorted([f"rgb/{t}" for t in os.listdir(paths + "/rgb")], key=lambda t: int(t[4:].split(".")[0]))]
        _data_per_view['ext'] = [[f"extrinsic/{t}.txt" for t in range(len(_data_per_view['rgb'][0]))]]
        _data_per_view['ixt_v'] = [[self.full_intrinsic for i in range(len(_data_per_view['rgb'][0]))]]
        # if self.find_intrinsics_per_view() is not None:
        #     _data_per_view['ixt_v'] = self.find_intrinsics_per_view()
        if self.load_depth:
            _data_per_view['dep'] = [[f"depth/{t}.png" for t in range(len(_data_per_view['rgb'][0]))]]
        if self.load_mask:
            _data_per_view['mask'] = self.find_mask()
        _data_per_view['view'] = self.summary_view_data(_data_per_view)
        
        def add_base_root(file_path):
            tmp = []
            for i in file_path:
                tmp.append(os.path.join(paths, i))
            return tmp
        _data_per_view['rgb'][0] = add_base_root(_data_per_view['rgb'][0])
        _data_per_view['ext'][0] = add_base_root(_data_per_view['ext'][0])
        _data_per_view['dep'][0] = add_base_root(_data_per_view['dep'][0])

        # group the data.
        _index = 0
        for r in range(repeat):
            # HACK: making several copies to enable multi-GPU usage.
            if r == 0 and preload:
                self.cache = []
                logger.info('pre-load the dataset into memory.')

            for id in range(self.total_num_shape): 
                element = {}
                total_num_view = len(_data_per_view['rgb'][id]) 
                perm_ids = np.random.permutation(total_num_view) if False else np.arange(total_num_view) # random shuffle the sequence
                for key in _data_per_view:
                    element[key] = [_data_per_view[key][id][i] for i in perm_ids]
                self.data[_index].update(element)

                if r == 0 and preload:
                    phase_name = f"{'train' if self.train else 'valid'}" + \
                                f".{self.resolution[0]}x{self.resolution[1]}" + \
                                f"{'.d' if load_depth else ''}" + \
                                f"{'.m' if load_mask else ''}" + \
                                f"{'b' if not self.apply_mask_color else ''}" + \
                                "_full"
                    logger.info("preload {}-{}".format(id, phase_name))
                    if binarize:
                        cache = self._load_binary(id, np.arange(total_num_view), phase_name)
                    else:
                        cache = self._load_batch(self.data, id, np.arange(total_num_view))
                    self.cache += [cache]
                _index += 1

        # group the data together
        self.data_index = []
        for i, d in enumerate(self.data):
            # if self.train:
            if False:
                index_list = list(range(len(d['rgb'])))
                self.data_index.append(
                    data_utils.InfIndex(index_list, shuffle=True)
                )
            else:
                copy_id = i // self.total_num_shape
                index_list = []
                for j in range(copy_id * num_view, copy_id * num_view + num_view):
                    index_list.append(j % len(d['rgb']))
                self.data_index.append(
                    data_utils.InfIndex(index_list, shuffle=False)
                )

    def _load_binary(self, id, views, phase='train'):
        root = os.path.dirname(self.data[id]['shape'])
        npzfile = os.path.join(root, '{}.npz'.format(phase))
        try:
            with np.load(npzfile, allow_pickle=True) as f:
                return f['cache']
        except Exception:
            cache = self._load_batch(self.data, id, views)
            if data_utils.get_rank() == 0:
                np.savez(npzfile, cache=cache)
            return cache

    def select(self, file_list):
        if len(file_list[0]) == 0:
            raise FileNotFoundError
        return [[files[i] for i in self.views] for files in file_list]
    
    def find_rgb(self):
        try:
            return self.select([sorted(glob.glob(path + '/rgb/*.*g')) for path in self.paths])
        except FileNotFoundError:
            try:
                return self.select([sorted(glob.glob(path + '/color/*.*g')) for path in self.paths])
            except FileNotFoundError:
                raise FileNotFoundError("CANNOT find rendered images.")
    
    def find_depth(self):
        try:
            return self.select([sorted(glob.glob(path + '/depth/*.exr')) for path in self.paths])
        except FileNotFoundError:
            raise FileNotFoundError("CANNOT find estimated depths images") 

    def find_mask(self):
        try:
            return self.select([sorted(glob.glob(path + '/mask/*')) for path in self.paths])
        except FileNotFoundError:
            raise FileNotFoundError("CANNOT find precomputed mask images")

    def find_extrinsics(self):
        try:
            return self.select([sorted(glob.glob(path + '/extrinsic/*.txt')) for path in self.paths])
        except FileNotFoundError:
            try:
                self.world2camera = False
                return self.select([sorted(glob.glob(path + '/pose/*.txt')) for path in self.paths])
            except FileNotFoundError:
                raise FileNotFoundError('world2camera or camera2world matrices not found.')   
    
    def find_intrinsics_per_view(self):
        try:
            return self.select([sorted(glob.glob(path + '/intrinsic/*.txt')) for path in self.paths])
        except FileNotFoundError:
            return None

    def summary_view_data(self, _data_per_view):
        keys = [k for k in _data_per_view if _data_per_view[k] is not None]
        num_of_objects = len(_data_per_view[keys[0]])
        for k in range(num_of_objects): 
            assert len(set([len(_data_per_view[key][k]) for key in keys])) == 1, "numer of views must be consistent."
        return [list(range(len(_data_per_view[keys[0]][k]))) for k in range(num_of_objects)]

    def num_tokens(self, index):
        return self.num_view

    def _load_view(self, packed_data, view_idx):
        image, uv, ratio = data_utils.load_rgb(
            packed_data['rgb'][view_idx], 
            resolution=self.resolution,
            bg_color=self.bg_color,
            min_rgb=self.min_color)
        rgb, alpha = image[:3], image[3]  # C x H x W for RGB
        extrinsics = data_utils.load_matrix(packed_data['ext'][view_idx]) 
        self.world2camera = False
        extrinsics = motion_util.parse_extrinsics(extrinsics, self.world2camera).astype('float32') 
        intrinsics = packed_data['ixt_v'][view_idx].astype(np.float32)

        z, mask = None, None
        if packed_data.get('dep', None) is not None:
            z = data_utils.load_depth(packed_data['dep'][view_idx], resolution=self.resolution)/5000. #dataset setting
        if packed_data.get('mask', None) is not None:
            mask = data_utils.load_mask(packed_data['mask'][view_idx], resolution=self.resolution)
            if self.apply_mask_color:   # we can also not apply mask
                rgb = rgb * mask[None, :, :] + (1 - mask[None, :, :]) * np.asarray(self.bg_color)[:, None, None]

        return {
            'path': packed_data['rgb'][view_idx],
            'view': view_idx,
            'uv': uv.reshape(2, -1), 
            'colors': rgb.reshape(3, -1).transpose(1,0), 
            'alpha': alpha.reshape(-1), 
            'extrinsics': extrinsics,
            'intrinsics': intrinsics,
            'depths': z.reshape(-1) if z is not None else None,
            'mask': mask.reshape(-1) if mask is not None else None,
            'size': np.array([rgb.shape[1], rgb.shape[2]] + ratio, dtype=np.float32)
        }

    def _load_batch(self, data, index, view_ids=None):
        if view_ids is None:
            view_ids = [next(self.data_index[index]) for _ in range(self.num_view)]
        return index, self._load_shape(data[index]), [self._load_view(data[index], view_id) for view_id in view_ids]

    def get_base_sequence(self, index, view_ids=None):
        if self.cache is not None:
            view_ids = [next(self.data_index[index]) for _ in range(self.num_view)]
            return copy.deepcopy(self.cache[index % self.total_num_shape][0]), \
                   copy.deepcopy(self.cache[index % self.total_num_shape][1]), \
                  [copy.deepcopy(self.cache[index % self.total_num_shape][2][i]) for i in view_ids]
        return self._load_batch(self.data, index, view_ids)

    def __getitem__(self, index):
        return self._load_view(self.data[0], index)

    def collater(self, samples):
        results = super().collater(samples)
        if results is None:
            return results

        for key in samples[0][2][0]:
            if key == 'path':
                results[key] = [[d[key] for d in s[2]] for s in samples]

            elif samples[0][2][0][key] is not None:
                results[key] = torch.from_numpy(
                    np.array([[d[key] for d in s[2]] for s in samples])
                )
                
        results['colors'] = results['colors'].transpose(2, 3)
        if results.get('full_rgb', None) is not None:
            results['full_rgb'] = results['full_rgb'].transpose(2, 3)
        return results

class Voxelizer(nn.Module):
    def __init__(self, args, device="cpu") -> None:
        super().__init__()

        self.args = args
        self.device = device
        self.voxel_size = args.voxel_size
        # self.step_size = args.raymarching_stepsize_ratio * self.voxel_size
        steps = ((np.asarray(args.bound_max) - np.asarray(args.bound_min)) / 
            self.voxel_size).round().astype('int64') + 1
        self.n_xyz = steps
        x, y, z = [c.reshape(-1).astype('float32') for c in np.meshgrid(np.arange(steps[0]), np.arange(steps[1]), np.arange(steps[2]))]
        points_index_xyz = torch.from_numpy(np.stack([x, y, z]).T).long().to(device)
        points_index = self._linearize_id(points_index_xyz)
        x, y, z = x * self.voxel_size + np.asarray(args.bound_min)[0], \
                y * self.voxel_size + np.asarray(args.bound_min)[1], \
                z * self.voxel_size + np.asarray(args.bound_min)[2]
        fine_points = torch.from_numpy(np.stack([x, y, z]).T.astype('float32')).to(device)
        # transform from voxel centers to voxel corners (key/values)
        fine_coords, _ = motion_util.discretize_points(fine_points, self.voxel_size * .5) 
        fine_keys0 = motion_util.offset_points(fine_coords, 1.0).reshape(-1, 3) # (210*8, 3) note that this is tight cube, so every cube's corner is another cube's corner
        fine_keys, fine_feats = torch.unique(fine_keys0, dim=0, sorted=True, return_inverse=True) # keys: index of cube 
        fine_feats = fine_feats.reshape(-1, 8) # N, 8, content is index, means every voxel has its eight vertex, and saved in this fine_feats
        num_keys = torch.scalar_tensor(fine_keys.size(0)).long() # num of unique vertex
        
        self.mesh_update_affected = [torch.tensor([t], device=self.device)
                                     for t in [[-1, 0, 0], [1, 0, 0],
                                               [0, -1, 0], [0, 1, 0],
                                               [0, 0, -1], [0, 0, 1]]]
        logging.info(f"Map size Nx = {self.n_xyz[0]}, Ny = {self.n_xyz[1]}, Nz = {self.n_xyz[2]}")
        self.register_buffer("points", fine_points)          # voxel centers
        self.register_buffer("points_index", points_index)          # voxel centers index
        self.register_buffer("keys", fine_keys.long())       # id used to find voxel corners/embeddings
        self.register_buffer("feats", fine_feats.long())     # for each voxel, 8 voxel corner ids
        self.register_buffer("num_keys", num_keys)           # num of unique cornor
        self.register_buffer("keep", fine_feats.new_zeros(fine_feats.size(0)).long())  # whether the voxel will be pruned
        # self.register_buffer("max_hits", torch.scalar_tensor(args.max_hits))
        self.register_buffer("indexer", torch.ones(np.product(self.n_xyz), device=device, dtype=torch.long) * -1)
        self.register_buffer("values", torch.zeros((num_keys, 256), device=device, dtype=torch.float))
        self.bound_min = fine_points[0]
        self.bound_max = fine_points[-1]
        ##TODO may later remove from the dataset.
        self.img_encoder = make_encoder(num_layers=3).to(self.device)
        self.img_encoder.eval() ### NOTE: here we do not re-train resnet.
        self.rgb_norm = vis_util.get_nor_img()
    
    def _linearize_id(self, xyz: torch.Tensor):
        """
        :param xyz (N, 3) long id
        :return: (N, ) lineraized id to be accessed in self.indexer
        """
        return xyz[:, 2] + self.n_xyz[-1] * xyz[:, 1] + (self.n_xyz[-1] * self.n_xyz[-2]) * xyz[:, 0]

    def _unlinearize_id(self, idx: torch.Tensor):
        """
        :param idx: (N, ) linearized id for access in self.indexer
        :return: xyz (N, 3) id to be indexed in 3D
        """
        return torch.stack([idx // (self.n_xyz[1] * self.n_xyz[2]),
                            (idx // self.n_xyz[2]) % self.n_xyz[1],
                            idx % self.n_xyz[2]], dim=-1)

    def allocate_block(self, idx: torch.Tensor):
        """
        :param idx: (N, 3) or (N, ), if the first one, will call linearize id.
        NOTE: this will not check index overflow!
        """
        # if idx.ndimension() == 2 and idx.size(1) == 3:
        #     idx = self._linearize_id(idx)
        # new_id = self._inflate_latent_buffer(idx.size(0))
        # self.indexer[idx] = new_id # indexer login flattened index # idx:10000,20000, NOTE:this func is may cause error.
        for i in idx:
            index = self.points_index.eq(i).nonzero()[0]
            self.keep[index] = 1

    def update_feat(self, pose_feat, encoder_states):
        '''
            we use this func to update the self.value
        '''
        self.values[encoder_states["unique_feat_idx"]] = pose_feat # N 256
        encoder_states["voxel_vertex_emb"] = self.values

    def preprocess(self, samples):
        _, intrinsic_id, sample = samples
        num_views = len(sample)
        array_name = ["colors", "uv", "alpha", 'extrinsics','intrinsics', 'depths']
        if isinstance(sample[0]['colors'], np.ndarray):
            transfer = lambda x: torch.from_numpy(x).to(self.device)
            for i in range(num_views):
                for j in array_name:
                    sample[i][j]=transfer(sample[i][j])
        # use bound to aviod outliers
        for i in range(num_views):
            sample[i]['depths'][torch.logical_or(sample[i]['depths'] < self.args.depth_cut_min,
                                sample[i]['depths'] > self.args.depth_cut_max)] = np.nan
            tracker_pc,_ = self.tracker(sample[i]) # TODO: add normal to calculate
            opt_depth = torch.inverse(sample[i]['extrinsics'])@tracker_pc.transpose_(1,0) # pose@Pw
            self.integrate_keyframe(opt_depth[:3,:].T, False, False)

        logging.info(f"integrate key frames with voxel_num: {self.keep.sum()}")

        encoder_states = self.get_info_as_NSVF()    #   voxel_vertex_idx, voxel_center_xyz, voxel_pos_unique
        pos_feat_l = []
        valid_mask_l = []

        for index in range(num_views):
            uv = self.projection(encoder_states["voxel_pos_unique"], sample[index])
            # get uv valid_index
            valid_mask = motion_util.get_valid_uv(uv, w=int(sample[index]['size'][1]), h = int(sample[index]['size'][0]))
            #### vis the uv:
            test_img = np.zeros((480,640,3))
            for i in uv:
                test_img = cv2.circle(test_img, i.cpu().numpy().astype(np.uint16), 1, (255, 0, 0), 1)
            cv2.imwrite("test.jpg", test_img)

            pos_feat = self.get_corresponding_feat(uv, sample[index]['colors'].reshape(3, int(sample[index]['size'][0]), -1))
            valid_mask_l.append(valid_mask.float())
            pos_feat_l.append(pos_feat)
        # use mean
        # pos_feat = torch.stack(pos_feat_l).mean(dim=0).squeeze() # latentsize, N
        # use element wise mean
        valid_mask = torch.clamp_min(torch.stack(valid_mask_l).sum(dim=0), 1.) 
        pos_feat = torch.stack(pos_feat_l).sum(dim=0).squeeze().permute(1,0) / valid_mask # only use valid part to mean

        ## update embedding 
        self.update_feat(pos_feat, encoder_states)

        return sample, encoder_states

    def projection(self, Pw, sample):
        K = sample['intrinsics']
        extinsic = sample['extrinsics']
        Pw = motion_util.homo_vec(Pw)
        uv = K@extinsic@Pw
        uv = (uv / uv[2,:])[:2,:].T
        return uv

    def tracker(self, sample, homo=True):
        rgb_data = sample['colors'].reshape(3, int(sample['size'][0]), int(sample['size'][1])).permute(1,2,0)
        intrinsics = sample['intrinsics']
        cur_intensity = torch.mean(rgb_data, dim=-1)
        cur_depth = sample['depths'].reshape(int(sample['size'][0]), int(sample['size'][1]))
        cur_intensity, cur_depth, _ = self._make_image_pyramid(cur_intensity, cur_depth)
        cur_rgb = rgb_data.permute(2, 0, 1)       # (3, H, W)
        fx,fy,cx,cy = intrinsics[0,0].item(), intrinsics[1,1].item(), intrinsics[0,2].item(), intrinsics[1,2].item()
        pc_scale = 1.0
        pc_data = torch.nn.functional.interpolate(cur_depth[0].unsqueeze(0).unsqueeze(0),
                                                  scale_factor=pc_scale, mode='nearest',
                                                  recompute_scale_factor=False).squeeze(0).squeeze(0)
        cur_rgb = torch.nn.functional.interpolate(cur_rgb.unsqueeze(0), scale_factor=pc_scale, 
                                                    mode='bilinear',align_corners=True, 
                                                    recompute_scale_factor=False).squeeze(0)
        pc_data = unproject_depth(pc_data, fx * pc_scale, fy * pc_scale,
                            cx * pc_scale, cy * pc_scale)
        pc_data = torch.cat([pc_data, torch.zeros((pc_data.size(0), pc_data.size(1), 1), device=pc_data.device)], dim=-1)
        pc_data = pc_data.reshape(-1, 4)
        cur_rgb = cur_rgb.permute(1, 2, 0)      # (W, H, 3)
        cur_rgb = cur_rgb.reshape(-1, 3)

        nan_mask = ~torch.isnan(pc_data[..., 0])
        pc_data = pc_data[nan_mask]
        cur_rgb = cur_rgb[nan_mask]

        with torch.cuda.device(self.device):
            pc_data_valid_mask = remove_radius_outlier(pc_data, 16, 0.05)
            pc_data = pc_data[pc_data_valid_mask]
            cur_rgb = cur_rgb[pc_data_valid_mask]
        if homo:
            pc_data[:,3] = torch.ones_like(pc_data[:,3])
        else:
            pc_data = pc_data[:,:3]
        return pc_data, cur_rgb

    def _make_image_pyramid(self, intensity_img: torch.Tensor, depth_img: torch.Tensor):
        d0_w, d0_h = intensity_img.size(1), intensity_img.size(0)
        d1_w, d1_h = d0_w // 2, d0_h // 2
        d2_w, d2_h = d1_w // 2, d1_h // 2
        d0_intensity = intensity_img.view(1, 1, d0_h, d0_w)
        d0_depth = depth_img.view(1, 1, d0_h, d0_w)
        d1_intensity = torch.nn.functional.interpolate(d0_intensity, (d1_h, d1_w), mode='bilinear',align_corners=True)
        d1_depth = torch.nn.functional.interpolate(d0_depth, (d1_h, d1_w), mode='nearest')
        d2_intensity = torch.nn.functional.interpolate(d1_intensity, (d2_h, d2_w), mode='bilinear',align_corners=True)
        d2_depth = torch.nn.functional.interpolate(d1_depth, (d2_h, d2_w), mode='nearest')
        d0_gradient = gradient_xy(d0_intensity.squeeze(0).squeeze(0))
        d1_gradient = gradient_xy(d1_intensity.squeeze(0).squeeze(0))
        d2_gradient = gradient_xy(d2_intensity.squeeze(0).squeeze(0))
        return [t.squeeze(0).squeeze(0) for t in [d0_intensity, d1_intensity, d2_intensity]], \
               [t.squeeze(0).squeeze(0) for t in [d0_depth, d1_depth, d2_depth]], \
               [d0_gradient, d1_gradient, d2_gradient]

    def integrate_keyframe(self, surface_xyz: torch.Tensor, do_optimize: bool = False, async_optimize: bool = False):
        assert surface_xyz.device == self.device, \
            f"Device of map {self.device} and input observation " \
            f"{surface_xyz.device} must be the same."

        # -- 1. Allocate new voxels --
        surface_xyz_zeroed = surface_xyz - self.bound_min.unsqueeze(0)
        surface_xyz_normalized = surface_xyz_zeroed / self.voxel_size
        surface_grid_id = torch.ceil(surface_xyz_normalized).long() - 1 # here compute all voxels 
        surface_grid_id = self._linearize_id(surface_grid_id) # 65931

        # Remove the observations where it is too sparse.
        unq_mask = None
        if self.args.prune_min_vox_obs > 0:
            '''
            torch.unique: return, list, inverse_indices, count
            inverse_indices: representing the indices for where elements in the original input map to in the output; 
            otherwise, this function will only return a single tensor.
            '''
            _, unq_inv, unq_count = torch.unique(surface_grid_id, return_counts=True, return_inverse=True) # 2471 valid voxels
            unq_mask = (unq_count > self.args.prune_min_vox_obs)[unq_inv]
            surface_xyz_normalized = surface_xyz_normalized[unq_mask]
            surface_grid_id = surface_grid_id[unq_mask]

        # Identify empty cells, fill the indexer.
        invalid_surface_ind = self.indexer[surface_grid_id] == -1 # find points not registered
        if invalid_surface_ind.sum() > 0:
            ## this means some points is not registered in self.indexer
            invalid_flatten_id = torch.unique(surface_grid_id[invalid_surface_ind])
            #TODO: We expand this because we want to create some dummy voxels which helps the mesh extraction.
            invalid_flatten_id = self._expand_flatten_id(invalid_flatten_id, ensure_valid=True)
            invalid_flatten_id = invalid_flatten_id[self.indexer[invalid_flatten_id] == -1] ## new located indexer
            self.allocate_block(invalid_flatten_id)

    def _expand_flatten_id(self, base_flatten_id: torch.Tensor, ensure_valid: bool = False):
        ### use the func to dilate the voxel. 
        expanded_flatten_id = [base_flatten_id]
        updated_pos = self._unlinearize_id(base_flatten_id) # pos -> position
        for affected_offset in self.mesh_update_affected:
            rs_id = updated_pos + affected_offset
            for dim in range(3):
                rs_id[:, dim].clamp_(0, self.n_xyz[dim] - 1)
            rs_id = self._linearize_id(rs_id)
            if ensure_valid:
                rs_id = rs_id[self.indexer[rs_id] != -1]
            expanded_flatten_id.append(rs_id)
        expanded_flatten_id = torch.unique(torch.cat(expanded_flatten_id))
        return expanded_flatten_id

    def get_info_as_NSVF(self):
        '''
        Usage: use this func to get voxel info
        NOTE: the voxel defination is same as NSVF
        '''
        # occupied_flatten_id = torch.where(self.indexer != -1)[0]  # (B, )
        points = self.points[self.keep.bool()]
        # points[:, 0] += (self.voxel_size / 10) # origin NSVF use this bia to aviod encounter -1
        feats  = self.feats[self.keep.bool()]
        # use the feats to judge the corner positon
        unique_feat_idx, _ = torch.unique(feats.flatten(), sorted=True, return_inverse=True)
        unique_cor_idx = self.keys[unique_feat_idx]
        voxel_pos_unique = unique_cor_idx*self.voxel_size*0.5+self.bound_min # this is updated version

        encoder_states = {
            'voxel_vertex_idx': feats, # (K, 8)
            'voxel_center_xyz': points, # center of voxel, (K,1)
            'voxel_pos_unique': voxel_pos_unique, # N, 3, position of voxel corner
            'unique_feat_idx': unique_feat_idx # use for index self.keys & self.values
        }
        return encoder_states

    def update_feat(self, pose_feat, encoder_states):
        '''
        we use this func to update the self.values
        '''
        self.values[encoder_states["unique_feat_idx"]] = pose_feat # N 256
        encoder_states["voxel_vertex_emb"] = self.values

    def get_corresponding_feat(self, uv, image, image_size=None):
        '''
            get uv's corresponding feat using grid sample;
            uv: B N 2, index of pixel: from 0 to image shape;
            img: path or tensor;
        '''
        if isinstance(image, str):
            rgb_data = cv2.imread(str(image))
            rgb_data = cv2.cvtColor(rgb_data, cv2.COLOR_BGR2RGB) # eqs imageio.imread()[..., :3]
            ## tobe NCHW
            rgb = self.rgb_processer(rgb_data).unsqueeze(0).to(self.device)
        else:
            ## image is tensor with HWC
            if image.shape[-1] == 3:
                image = image.permute(2,0,1)
            rgb = self.rgb_norm(image)
        if len(rgb.shape) < 4:
            rgb = rgb.unsqueeze(0)
        with torch.no_grad():
            self.img_encoder(rgb)
            if len(uv.shape) < 3:
                uv = uv.unsqueeze(0) # 1 N 2
            image_shape = torch.tensor([640., 480.], device=uv.device)
            vtx_feat = self.img_encoder.index(uv, image_size=image_shape) # TODO replace with w, h
        return vtx_feat

# class sample_dataset(torch.utils.data.Dataset):
#     def __init__(self, args) -> None:
#         super().__init__()
        

if __name__=="__main__":
    views = 1
    num_view = 4
    reso = "480x640"
    view_ids = [0, 10, 20, 30]
    a = ShapeViewDataset("/data/yangchen/ICL-NUIM/Di-Fusion_demo", views, num_view, resolution=reso, load_depth=True, preload=False)
    sample = a.get_base_sequence(index = 0, view_ids=view_ids)
    # itera = iter(a)
    # sample = next(itera, index = 0, view_ids=view_ids)
    print("show me the keys")
    vox = Voxelizer(args=None, device=torch.device("cuda:0"))
    vox.preprocess(sample)
