# Copyright (c) Phigent Robotics. All rights reserved.
from .bevdet import BEVStereo4D, BEVDepth4D

import torch
from mmdet.models import DETECTORS
from mmdet.models.builder import build_loss
from mmcv.cnn.bricks.conv_module import ConvModule
from mmdet.models.backbones.resnet import ResNet

from torch import nn
import numpy as np

# additional imports
from .. import builder
import MinkowskiEngine as ME
import torch.nn.functional as F

class MinkowskiSoftplus(nn.Module):
    def forward(self, x):
        return ME.SparseTensor(
            nn.functional.softplus(x.F),
            coordinate_map_key=x.coordinate_map_key,
            coordinate_manager=x.coordinate_manager
        )
@DETECTORS.register_module()
class BEVStereo4DOCC(BEVStereo4D):

    def __init__(self,
                 loss_occ=None,
                 out_dim=32,
                 dataset_type='nuscenes',
                 use_mask=False,
                 num_classes=18,
                 use_predicter=True,
                 class_wise=False,
                 lidar_backbone = None,
                 lidar_neck = None,
                 fusion = None,
                 loss_bce_weight = None,
                
                 **kwargs):
        super(BEVStereo4DOCC, self).__init__(**kwargs)
        self.out_dim = out_dim
        self.dataset_type = dataset_type
        self.loss_bce_weight = loss_bce_weight
        out_channels = out_dim if use_predicter else num_classes
        self.final_conv = ConvModule(
                        self.img_view_transformer.out_channels,
                        out_channels,
                        kernel_size=3,
                        stride=1,
                        padding=1,
                        bias=True,
                        conv_cfg=dict(type='Conv3d'))
        self.use_predicter =use_predicter
        if use_predicter:
            # self.predicter = nn.Sequential(
            #     nn.Linear(self.out_dim, self.out_dim*2),
            #     nn.Softplus(),
            #     nn.Linear(self.out_dim*2, num_classes),
            # )
            self.predicter = nn.Sequential(
                nn.Linear(num_classes, num_classes*2),
                nn.Softplus(),
                nn.Linear(num_classes*2, num_classes),
            )
            # self.predicter = nn.Sequential(
            #     ME.MinkowskiLinear(self.out_dim, self.out_dim*2),
            #     MinkowskiSoftplus(),
            #     ME.MinkowskiLinear(self.out_dim*2, num_classes),
            # )
        self.pts_bbox_head = None
        self.use_mask = use_mask
        self.num_classes = num_classes
        self.loss_occ = build_loss(loss_occ)
        self.class_wise = class_wise
        self.align_after_view_transfromation = False
        
        # Create x, y, z coordinate grids
        x = torch.arange(200)
        y = torch.arange(200)
        z = torch.arange(16)
        # Generate a coordinate grid for each dimension (200, 200, 16)
        mesh_x, mesh_y, mesh_z = torch.meshgrid(x, y, z, indexing='ij')
        # Flatten the coordinate grids and stack them to get (200*200*16, 3)
        self.COO_format_coords = torch.stack([mesh_x.flatten(), mesh_y.flatten(), mesh_z.flatten()], dim=1)


        self.lidar_backbone = builder.build_backbone(lidar_backbone)    
        self.lidar_neck = builder.build_neck(lidar_neck)
        self.fusion = builder.build_fusion_layer(fusion)
        # self.occ_backbone = builder.build_backbone(occ_backbone)
        # self.occ_neck = builder.build_neck(occ_neck)

    def loss_single(self,voxel_semantics,mask_camera,preds):
        loss_ = dict()
        voxel_semantics=voxel_semantics.long()
        if self.use_mask:
            mask_camera = mask_camera.to(torch.int32)
            voxel_semantics=voxel_semantics.reshape(-1)
            preds=preds.reshape(-1,self.num_classes)
            mask_camera = mask_camera.reshape(-1)
            num_total_samples=mask_camera.sum()
            loss_occ=self.loss_occ(preds,voxel_semantics,mask_camera, avg_factor=num_total_samples)
            loss_['loss_occ'] = loss_occ
        else:
            voxel_semantics = voxel_semantics.reshape(-1)
            preds = preds.reshape(-1, self.num_classes)
            loss_occ = self.loss_occ(preds, voxel_semantics,)
            loss_['loss_occ'] = loss_occ
        return loss_

    def simple_test(self,
                    points,
                    img_metas,
                    img=None,
                    rescale=False,
                    **kwargs):
        """Test function without augmentaiton."""
        # mainpulate gt to fit supervision for minkowski engine
        voxel_semantics = kwargs['voxel_semantics'] # (b, 200, 200, 16)
        mask_camera = kwargs['mask_camera']
        if self.dataset_type == 'nuscenes':
            assert voxel_semantics[0].min() >= 0 and voxel_semantics[0].max() <= 17
        if self.dataset_type == 'waymo':
            assert voxel_semantics[0].min() >= 0 and voxel_semantics[0].max() <= 15#
        coo_list_gt = [] 
        semantics_list_gt = []
        for b in range(voxel_semantics[0].shape[0]):
            current_voxel_grid = voxel_semantics[b][0]
            if self.dataset_type == 'nuscenes':
                mask = current_voxel_grid != 17
            if self.dataset_type == 'waymo':
                mask = current_voxel_grid != 15
            coords = torch.argwhere(mask) # (dense_points,3)
            coo_list_gt.append(coords)    
            all_feats = current_voxel_grid.view(-1, 1)  # (200*200*16, 1)
            all_coords = self.COO_format_coords.to(current_voxel_grid.device)
            all_coords_and_feats = torch.cat([all_coords, all_feats], dim=1)
            semantics_list_gt.append(all_coords_and_feats) # (200*200*16, 4)

        voxels, num_points, coors = self.voxelize(points)
        if self.dataset_type == 'nuscenes':
            coors[:, 3] = 200 - coors[:, 3]  # Reverse the y direction
            coors = coors[:, [0, 2, 3, 1]] # move b,z,x,y,to b, x, y,z
        if self.dataset_type == 'waymo':
            coors = coors[:, [0, 3, 2, 1]]
        voxel_features = self.pts_voxel_encoder(voxels, num_points, coors)

        # sparse to dense lidar semantic segmentation
        pts_sparse_tensor = ME.SparseTensor(
            features = voxel_features, 
            coordinates = coors,
            device = voxel_features.device,
        )
        cm = pts_sparse_tensor.coordinate_manager
        target_key, _ = cm.insert_and_map(
            ME.utils.batched_coordinates(coo_list_gt).to(voxel_features.device),
            string_id = "target",
        )
        pts_feats = self.lidar_backbone(pts_sparse_tensor)
        _, _, pts_feat = self.lidar_neck(pts_feats, target_key)
        
        
        # convert pts_feat from list[SparseTensor] to [b, 200, 200, 16] grid format
        pts_coord, pts_feat = pts_feat.decomposed_coordinates_and_features
        batch_grids = []
        for each_coord, each_feat in zip(pts_coord, pts_feat):
            grid = torch.zeros((pts_feat[0].shape[1], 16, 200, 200), dtype=each_feat.dtype, device=each_feat.device)
            x = each_coord[:, 0].long()
            y = each_coord[:, 1].long()
            z = each_coord[:, 2].long()
            mask = (x < 200) & (y < 200) & (z < 16)
            x, y, z = x[mask], y[mask], z[mask]
            each_feat = each_feat[mask]  # Filter features using the mask
            grid[:, z, y, x] = each_feat.t()
            batch_grids.append(grid)
        # Stack the grids along the batch dimension to get [batch, channels, z, y, x]
        occ_pred_lidar = torch.stack(batch_grids, dim=0)
        
        
        
        img_feats, _, _ = self.extract_feat(
            points, img=img, img_metas=img_metas, **kwargs)
        occ_pred_cam = self.final_conv(img_feats)# bncdhw->bnwhdc (bczyx -> bxyzc)
        occ_pred = self.fusion(occ_pred_cam, occ_pred_lidar).permute(0, 4, 3, 2, 1) # bncdhw->bnwhdc (bczyx -> bxyzc)

        if self.use_predicter:
            occ_pred = self.predicter(occ_pred)
            

        occ_score=occ_pred.softmax(-1)
        occ_res=occ_score.argmax(-1)
        occ_res = occ_res.squeeze(dim=0).cpu().numpy().astype(np.uint8)
        return [occ_res]

    def forward_train(self,
                      points=None,
                      img_metas=None,
                      gt_bboxes_3d=None,
                      gt_labels_3d=None,
                      gt_labels=None,
                      gt_bboxes=None,
                      img_inputs=None,
                      proposals=None,
                      gt_bboxes_ignore=None,
                      **kwargs):
        """Forward training function.

        Args:
            points (list[torch.Tensor], optional): Points of each sample.
                Defaults to None.
            img_metas (list[dict], optional): Meta information of each sample.
                Defaults to None.
            gt_bboxes_3d (list[:obj:`BaseInstance3DBoxes`], optional):
                Ground truth 3D boxes. Defaults to None.
            gt_labels_3d (list[torch.Tensor], optional): Ground truth labels
                of 3D boxes. Defaults to None.
            gt_labels (list[torch.Tensor], optional): Ground truth labels
                of 2D boxes in images. Defaults to None.
            gt_bboxes (list[torch.Tensor], optional): Ground truth 2D boxes in
                images. Defaults to None.
            img (torch.Tensor optional): Images of each sample with shape
                (N, C, H, W). Defaults to None.
            proposals ([list[torch.Tensor], optional): Predicted proposals
                used for training Fast RCNN. Defaults to None.
            gt_bboxes_ignore (list[torch.Tensor], optional): Ground truth
                2D boxes in images to be ignored. Defaults to None.

        Returns:
            dict: Losses of different branches.
        """
        losses = dict()
        
        # mainpulate gt to fit supervision for minkowski engine
        voxel_semantics = kwargs['voxel_semantics'] # (b, 200, 200, 16)
        mask_camera = kwargs['mask_camera']
        if self.dataset_type == 'nuscenes':
            assert voxel_semantics.min() >= 0 and voxel_semantics.max() <= 17
        if self.dataset_type == 'waymo':
            assert voxel_semantics.min() >= 0 and voxel_semantics.max() <= 15#
        coo_list_gt = [] 
        semantics_list_gt = []
        for b in range(voxel_semantics.shape[0]):
            current_voxel_grid = voxel_semantics[b]
            if self.dataset_type == 'nuscenes':
                mask = current_voxel_grid != 17
            if self.dataset_type == 'waymo':
                mask = current_voxel_grid != 15
            coords = torch.argwhere(mask) # (dense_points,3)
            coo_list_gt.append(coords)    
            all_feats = current_voxel_grid.view(-1, 1)  # (200*200*16, 1)
            all_coords = self.COO_format_coords.to(current_voxel_grid.device)
            all_coords_and_feats = torch.cat([all_coords, all_feats], dim=1)
            semantics_list_gt.append(all_coords_and_feats) # (200*200*16, 4)

        # voxelization of pointcloud
        voxels, num_points, coors = self.voxelize(points)
        if self.dataset_type == 'nuscenes':
            coors[:, 3] = 200 - coors[:, 3]  # Reverse the y direction
            coors = coors[:, [0, 2, 3, 1]] # move b,z,x,y,to b, x, y,z
        if self.dataset_type == 'waymo':
            coors = coors[:, [0, 3, 2, 1]]
        voxel_features = self.pts_voxel_encoder(voxels, num_points, coors)
        
        
        # sparse to dense lidar semantic segmentation
        pts_sparse_tensor = ME.SparseTensor(
            features = voxel_features, 
            coordinates = coors,
            device = voxel_features.device,
        )
        cm = pts_sparse_tensor.coordinate_manager
        target_key, _ = cm.insert_and_map(
            ME.utils.batched_coordinates(coo_list_gt).to(voxel_features.device),
            string_id = "target",
        )
        
        pts_feats = self.lidar_backbone(pts_sparse_tensor)
        out_cls, targets, pts_feat = self.lidar_neck(pts_feats, target_key)

        # bce loss calculation for scene completion point existence
        bce_loss = self.lidar_neck.get_bce_loss(out_cls, targets)   
        losses['loss_bce'] = bce_loss * self.loss_bce_weight
        
        # ce loss calculatiuon for lidar semantic segmentation 
        # ce_loss = self.lidar_neck.get_ce_loss(pts_feat, semantics_list_gt)
        # losses['loss_ce'] = ce_loss
        
        # convert pts_feat from list[SparseTensor] to [b, 200, 200, 16] grid format
        pts_coord, pts_feat = pts_feat.decomposed_coordinates_and_features
        batch_grids = []
        for each_coord, each_feat in zip(pts_coord, pts_feat):
            grid = torch.zeros((pts_feat[0].shape[1], 16, 200, 200), dtype=each_feat.dtype, device=each_feat.device)
            x = each_coord[:, 0].long()
            y = each_coord[:, 1].long()
            z = each_coord[:, 2].long()
            mask = (x < 200) & (y < 200) & (z < 16)
            x, y, z = x[mask], y[mask], z[mask]
            each_feat = each_feat[mask]  # Filter features using the mask
            grid[:, z, y, x] = each_feat.t()
            batch_grids.append(grid)
        # Stack the grids along the batch dimension to get [batch, channels, z, y, x]
        occ_pred_lidar = torch.stack(batch_grids, dim=0)

        # camera lss 3d resnet semantic segmentation
        img_feats, _, depth = self.extract_feat(
        points, img=img_inputs, img_metas=img_metas, **kwargs)
        gt_depth = kwargs['gt_depth']
        loss_depth = self.img_view_transformer.get_depth_loss(gt_depth, depth)
        losses['loss_depth'] = loss_depth
        occ_pred_cam = self.final_conv(img_feats) # (bczyx)

        # fuse sparse lidar with dense camera features, output should be dense grid (b, 200, 200, 16)
        occ_pred = self.fusion(occ_pred_cam, occ_pred_lidar).permute(0, 4, 3, 2, 1) # bncdhw->bnwhdc (bczyx -> bxyzc)

        if self.use_predicter:
            occ_pred = self.predicter(occ_pred)
            

        loss_occ = self.loss_single(voxel_semantics, mask_camera, occ_pred)
        losses.update(loss_occ)
        return losses
    
@DETECTORS.register_module()
class BEVStereo4DOCC_MinkOcc(BEVStereo4D):

    def __init__(self,
                 out_dim=18,
                 dataset_type='nuscenes',
                 use_mask=False,
                 num_classes=18,
                 use_predicter=True,
                 class_wise=False,
                 lidar_backbone = None,
                 lidar_neck = None, 
                 loss_ce_weight = None,
                 loss_bce_weight = None,       
                 **kwargs):
        
        super(BEVStereo4DOCC_MinkOcc, self).__init__(**kwargs)
        self.out_dim = out_dim
        self.dataset_type = dataset_type
        out_channels = out_dim if use_predicter else num_classes
        self.use_predicter =use_predicter
        if use_predicter:
            self.predicter = nn.Sequential(
                ME.MinkowskiLinear(self.out_dim, self.out_dim*2),
                MinkowskiSoftplus(),
                ME.MinkowskiLinear(self.out_dim*2, num_classes),
            )
        self.pts_bbox_head = None
        self.use_mask = use_mask
        self.num_classes = num_classes
        self.class_wise = class_wise
        self.align_after_view_transfromation = False
        self.loss_ce_weight = loss_ce_weight
        self.loss_bce_weight = loss_bce_weight
        
         # Create x, y, z coordinate grids
        x = torch.arange(200)
        y = torch.arange(200)
        z = torch.arange(16)
        # Generate a coordinate grid for each dimension (200, 200, 16)
        mesh_x, mesh_y, mesh_z = torch.meshgrid(x, y, z, indexing='ij')
        # Flatten the coordinate grids and stack them to get (200*200*16, 3)
        self.COO_format_coords = torch.stack([mesh_x.flatten(), mesh_y.flatten(), mesh_z.flatten()], dim=1)

        
        
        # build lidar backbone and neck
        self.lidar_backbone = builder.build_backbone(lidar_backbone)    
        self.lidar_neck = builder.build_neck(lidar_neck)
    
    def simple_test():
        pass
    
    def forward_train(self,
                      points=None,
                      img_metas=None,
                      gt_bboxes_3d=None,
                      gt_labels_3d=None,
                      gt_labels=None,
                      gt_bboxes=None,
                      img_inputs=None,
                      proposals=None,
                      gt_bboxes_ignore=None,
                      **kwargs):
        """Forward training function.

        Args:
            points (list[torch.Tensor], optional): Points of each sample.
                Defaults to None.
            img_metas (list[dict], optional): Meta information of each sample.
                Defaults to None.
            gt_bboxes_3d (list[:obj:`BaseInstance3DBoxes`], optional):
                Ground truth 3D boxes. Defaults to None.
            gt_labels_3d (list[torch.Tensor], optional): Ground truth labels
                of 3D boxes. Defaults to None.
            gt_labels (list[torch.Tensor], optional): Ground truth labels
                of 2D boxes in images. Defaults to None.
            gt_bboxes (list[torch.Tensor], optional): Ground truth 2D boxes in
                images. Defaults to None.
            img (torch.Tensor optional): Images of each sample with shape
                (N, C, H, W). Defaults to None.
            proposals ([list[torch.Tensor], optional): Predicted proposals
                used for training Fast RCNN. Defaults to None.
            gt_bboxes_ignore (list[torch.Tensor], optional): Ground truth
                2D boxes in images to be ignored. Defaults to None.

        Returns:
            dict: Losses of different branches.
        """
        
        losses = dict()
        
         # mainpulate gt to fit supervision for minkowski engine
        voxel_semantics = kwargs['voxel_semantics'] # (b, 200, 200, 16)
        mask_camera = kwargs['mask_camera'] # (b, 200, 200, 16)
        # convert mask_camera to torch.bool
        mask_camera = mask_camera.to(torch.bool)
        if self.dataset_type == 'nuscenes' and self.use_mask:
            # set mask camera hits to 17 in voxel semantics
            voxel_semantics[mask_camera] = 17
            assert voxel_semantics.min() >= 0 and voxel_semantics.max() <= 17
        if self.dataset_type == 'waymo' and self.use_mask:
            # set mask camera hits to 15 in voxel semantics
            voxel_semantics[mask_camera] = 15
            assert voxel_semantics.min() >= 0 and voxel_semantics.max() <= 15

        coo_list_gt = [] 
        semantics_list_gt = []
        for b in range(voxel_semantics.shape[0]):
            current_voxel_grid = voxel_semantics[b]
            if self.dataset_type == 'nuscenes':
                mask = current_voxel_grid != 17
            if self.dataset_type == 'waymo':
                mask = current_voxel_grid != 15
            coords = torch.argwhere(mask) # (dense_points,3)
            coo_list_gt.append(coords)    
            all_feats = current_voxel_grid.view(-1, 1)  # (200*200*16, 1)
            all_coords = self.COO_format_coords.to(current_voxel_grid.device)
            all_coords_and_feats = torch.cat([all_coords, all_feats], dim=1)
            semantics_list_gt.append(all_coords_and_feats) # (200*200*16, 4)

         # voxelization of pointcloud
        voxels, num_points, coors = self.voxelize(points)
        if self.dataset_type == 'nuscenes':
            coors[:, 3] = 200 - coors[:, 3]  # Reverse the y direction
            coors = coors[:, [0, 2, 3, 1]] # move b,z,x,y,to b, x, y,z
        if self.dataset_type == 'waymo':
            coors = coors[:, [0, 3, 2, 1]]
        voxel_features = self.pts_voxel_encoder(voxels, num_points, coors)
        
         # sparse to dense lidar semantic segmentation
        pts_sparse_tensor = ME.SparseTensor(
            features = voxel_features, 
            coordinates = coors,
            device = voxel_features.device,
        )
        cm = pts_sparse_tensor.coordinate_manager
        target_key, _ = cm.insert_and_map(
            ME.utils.batched_coordinates(coo_list_gt).to(voxel_features.device),
            string_id = "target",
        )
        
        pts_feats = self.lidar_backbone(pts_sparse_tensor)
        out_cls, targets, pts_feat = self.lidar_neck(pts_feats, target_key)

        # bce loss calculation for scene completion point existence
        bce_loss = self.lidar_neck.get_bce_loss(out_cls, targets)   
        losses['loss_bce'] = bce_loss * self.loss_bce_weight
        
        # ce loss calculatiuon for lidar semantic segmentation 
        if self.use_predicter:
            pts_feat = self.predicter(pts_feat)
        ce_loss = self.lidar_neck.get_ce_loss(pts_feat, semantics_list_gt)
        losses['loss_ce'] = ce_loss * self.loss_ce_weight
        
        return losses
    
@DETECTORS.register_module()
class BEVStereo4DOCC_robotcycle(BEVStereo4D):
    def __init__(self,
                 loss_occ=None,
                 out_dim=18,
                 use_mask=False,
                 num_classes=18,
                 use_predicter=True,
                 class_wise=False,
                 lidar_backbone = None,
                 lidar_neck = None,        
                 **kwargs):
        
        super(BEVStereo4DOCC_robotcycle, self).__init__(**kwargs)
        self.out_dim = out_dim
        out_channels = out_dim if use_predicter else num_classes
        self.use_predicter =use_predicter
        if use_predicter:
            self.predicter = nn.Sequential(
                ME.MinkowskiLinear(self.out_dim, self.out_dim*2),
                MinkowskiSoftplus(),
                ME.MinkowskiLinear(self.out_dim*2, num_classes),
            )
        self.pts_bbox_head = None
        self.use_mask = use_mask
        self.num_classes = num_classes
        self.loss_occ = build_loss(loss_occ)
        self.class_wise = class_wise
        self.align_after_view_transfromation = False
        
         # Create x, y, z coordinate grids
        # x = torch.arange(200)
        # y = torch.arange(200)
        # z = torch.arange(16)
        # # Generate a coordinate grid for each dimension (200, 200, 16)
        # mesh_x, mesh_y, mesh_z = torch.meshgrid(x, y, z, indexing='ij')
        # # Flatten the coordinate grids and stack them to get (200*200*16, 3)
        # self.COO_format_coords = torch.stack([mesh_x.flatten(), mesh_y.flatten(), mesh_z.flatten()], dim=1)

        
        
        # build lidar backbone and neck
        self.lidar_backbone = builder.build_backbone(lidar_backbone)    
        self.lidar_neck = builder.build_neck(lidar_neck)
    
    def loss_single():
        pass
    
    def simple_test():
        pass
    
    def forward_train(self,
                      points=None,
                      img_metas=None,
                      gt_bboxes_3d=None,
                      gt_labels_3d=None,
                      gt_labels=None,
                      gt_bboxes=None,
                      img_inputs=None,
                      proposals=None,
                      gt_bboxes_ignore=None,
                      **kwargs):
        """Forward training function.

        Args:
            points (list[torch.Tensor], optional): Points of each sample.
                Defaults to None.
            img_metas (list[dict], optional): Meta information of each sample.
                Defaults to None.
            gt_bboxes_3d (list[:obj:`BaseInstance3DBoxes`], optional):
                Ground truth 3D boxes. Defaults to None.
            gt_labels_3d (list[torch.Tensor], optional): Ground truth labels
                of 3D boxes. Defaults to None.
            gt_labels (list[torch.Tensor], optional): Ground truth labels
                of 2D boxes in images. Defaults to None.
            gt_bboxes (list[torch.Tensor], optional): Ground truth 2D boxes in
                images. Defaults to None.
            img (torch.Tensor optional): Images of each sample with shape
                (N, C, H, W). Defaults to None.
            proposals ([list[torch.Tensor], optional): Predicted proposals
                used for training Fast RCNN. Defaults to None.
            gt_bboxes_ignore (list[torch.Tensor], optional): Ground truth
                2D boxes in images to be ignored. Defaults to None.

        Returns:
            dict: Losses of different branches.
        """

        losses = dict()
        
    
         # voxelization of pointcloud
        voxels, num_points, coors = self.voxelize(points)
        voxel_features = self.pts_voxel_encoder(voxels, num_points, coors)
        
        
        return losses

@DETECTORS.register_module()
class BEVStereo4DOCC_MinkOccV2(BEVDepth4D):
    def __init__(self, 
                 out_dim=18,
                 dataset_type='nuscenes',
                 use_mask=False,
                 num_classes=18,
                 use_predicter=True,
                 lidar_backbone = None,
                 lidar_neck = None,
                 sparse_fusion = None,
                 occ_backbone = None,
                 occ_neck = None,
                 stereo = True, 
                 loss_ce_weight = None,
                 loss_bce_weight = None,
                 loss_lidarseg_weight = None,
                 **kwargs):
        super(BEVStereo4DOCC_MinkOccV2, self).__init__(**kwargs)
        self.stereo = stereo
        self.dataset_type = dataset_type
        self.use_mask = use_mask
        self.out_dim = out_dim

        # loss weights
        self.loss_ce_weight = loss_ce_weight
        self.loss_bce_weight = loss_bce_weight
        self.loss_lidarseg_weight = loss_lidarseg_weight

        self.use_predicter = use_predicter
        if use_predicter:
            self.predicter = nn.Sequential(
                ME.MinkowskiLinear(self.out_dim, self.out_dim*2),
                MinkowskiSoftplus(),
                ME.MinkowskiLinear(self.out_dim*2, num_classes),
            )
        # Create x, y, z coordinate grids
        x = torch.arange(200)
        y = torch.arange(200)
        z = torch.arange(16)
        # Generate a coordinate grid for each dimension (200, 200, 16)
        mesh_x, mesh_y, mesh_z = torch.meshgrid(x, y, z, indexing='ij')
        # Flatten the coordinate grids and stack them to get (200*200*16, 3)
        self.COO_format_coords = torch.stack([mesh_x.flatten(), mesh_y.flatten(), mesh_z.flatten()], dim=1)

        # build minkresunet
        self.lidar_backbone = builder.build_backbone(lidar_backbone)
        self.lidar_neck = builder.build_neck(lidar_neck)
        # build fusion 
        self.sparse_fusion = builder.build_fusion_layer(sparse_fusion)
        # build minkgenunet
        self.occ_backbone = builder.build_backbone(occ_backbone)
        self.occ_neck = builder.build_neck(occ_neck)

    def prepare_bev_feat(self, img):
        x, stereo_feat = self.image_encoder(img, stereo=True)
        return x, stereo_feat

    def extract_img_feat(self,
                         img,
                         img_metas,
                         pred_prev=False,
                         sequential=False,
                         **kwargs):
        # stack imgs over batches
        imgs = img[0] # (batch, num_views, channels, height, width)
        """Extract features of images."""
        with torch.no_grad():
            img_feat, feat_curr_iv = \
                self.prepare_bev_feat(imgs)    
        return img_feat
    
    def get_rgb_values(self, points_img, img):
        """
        Function to get RGB values at the projected 2D points using bilinear interpolation.
        """
        # Get image dimensions
        
        height, width = img.shape[1], img.shape[2]
        device = img.device

        # Normalize coordinates to [-1, 1] for grid_sample
        u = 2 * (points_img[:, 0] / (width - 1)) - 1
        v = 2 * (points_img[:, 1] / (height - 1)) - 1

        # Create a mask for points inside the image boundaries
        mask = (u >= -1) & (u <= 1) & (v >= -1) & (v <= 1)
        indices = mask.nonzero(as_tuple=False).squeeze(-1)

        # Get valid normalized coordinates
        u_valid = u[mask]
        v_valid = v[mask]

        if u_valid.numel() == 0:
            return torch.tensor([], dtype=torch.long, device=device), torch.tensor([], dtype=torch.float32, device=device)

        # Prepare grid for sampling
        grid = torch.stack((u_valid, v_valid), dim=-1).unsqueeze(0).unsqueeze(2)  # Shape: (1, N, 1, 2)

        # Sample RGB values using grid_sample with bilinear interpolation
        rgb_sampled = F.grid_sample(img.unsqueeze(0), grid, align_corners=True, mode='bilinear', padding_mode='zeros')  # Shape: (1, 3, N, 1)
        rgb_values = rgb_sampled.squeeze(0).squeeze(2).permute(1, 0)  # Shape: (N, 3)
    
        return indices, rgb_values

    def forward_train(self,
                      points=None,
                      img_metas=None,
                      gt_bboxes_3d=None,
                      gt_labels_3d=None,
                      gt_labels=None,
                      gt_bboxes=None,
                      img_inputs=None,
                      proposals=None,
                      gt_bboxes_ignore=None,
                      **kwargs):
        losses = dict()
        # define gpu device 
        gpu = points[0].device
        # points = list[torch.Tensor], optional): Points of each sample with rgb intensity and indices (n, 8), (xyz, rgbintensityindices)
        # points_xyzuv = list[torch.Tensor], optional): Points of each sample with uv coordinates. (n, 15), (xyz, uv1, uv2, uv3, uv4, uv5)
        # lidarseg = list[torch.Tensor], optional): Points of each sample with semantic segmentation labels (n,)

        # Quantize lidar points and its corresponding image coordinates to sparse tensor to be processed by minkresunet 
        original_pointcloud = []
        lidar_coords = []
        lidar_feats = []
        uv_for_each_cam = []
        lidar_labels = []
        # TODO: merge the two sparse qunatize together, the behavior is not deterministic
        for each_points, each_points_xyzuv, each_lidarseg in zip(points, kwargs['points_xyzuv'], kwargs['lidarseg']):
            # add in lidarseg once ground truth lidarseg loading pipeline is ready
            epc = each_points[:, :3].cpu()
            epf = each_points[:, 3:]
            # find the no. of cols in epf
            num_cols = epf.shape[1]
            ep_xyzuv_f = each_points_xyzuv[:, 3:]
            # combine epf with ep_xyzuv_f
            epf = torch.cat([epf, ep_xyzuv_f], dim=1).cpu()
            # reshape each_lidarseg from (n,) to (n, 1)
            each_lidarseg = each_lidarseg.view(-1, 1).to(torch.int32).cpu()
            sp_epc, sp_epf, sp_lidarseg, unique_map = ME.utils.sparse_quantize(epc, epf, each_lidarseg, return_index=True, quantization_size=0.01)

            # split sp_epf into sp_epf and sp_epf_xyzuv based on num cols
            sp_epf_xyzuv = sp_epf[:, num_cols:]
            sp_epf = sp_epf[:, :num_cols]

            original_pointcloud.append(epc[unique_map].to(gpu))
            lidar_coords.append(sp_epc)
            lidar_feats.append(sp_epf)
            uv_for_each_cam.append(sp_epf_xyzuv.to(gpu))
            lidar_labels.append(sp_lidarseg.to(gpu))

        ME_lidar_coords, ME_lidar_feats = ME.utils.sparse_collate(
            lidar_coords, lidar_feats, dtype=torch.float32
        )
        ME_lidar_tensor = ME.SparseTensor(ME_lidar_feats, ME_lidar_coords, device=gpu)

        # lidar segmentation (minkresunet)
        pts_feat_list = self.lidar_backbone(ME_lidar_tensor)
        _, _, ME_pts_feat = self.lidar_neck(pts_feat_list)

        # lidar segmentation supervision 
        lidarseg_loss = self.lidar_neck.get_lidarseg_loss(ME_pts_feat,lidar_labels)
        losses['loss_lidarseg'] = lidarseg_loss * self.loss_lidarseg_weight

        # camera image extraction 
        img_src_height, img_src_width = img_inputs[0].shape[-2], img_inputs[0].shape[-1]
        img_feats = self.extract_img_feat(img=img_inputs, img_metas=img_metas, **kwargs)
        # print(img_feats.shape) # (batch, num_views, channels, height, width)
        # resize img_feats to original img_inputs height and width using nearest neighbor)
        img_feats_resized = F.interpolate(
            img_feats.view(-1, img_feats.shape[2], img_feats.shape[3], img_feats.shape[4]), 
            size=(img_src_height, img_src_width), 
            mode='nearest'
        )
        # Reshape back to (batch, num_views, channels, img_src_height, img_src_width)
        img_feats_resized = img_feats_resized.view(img_feats.shape[0], img_feats.shape[1], img_feats.shape[2], img_src_height, img_src_width)

        # Initialize an empty list to hold the unprojected image features for each batch
        points_img_feats_all_batches = []
        # Loop over each batch
        for each_batch in range(img_feats.shape[0]):
            # Pre-fetch UV and image features for all views in the batch to avoid repeated memory allocations
            uv_batch = uv_for_each_cam[each_batch]
            img_feats_batch = img_feats_resized[each_batch]
            num_points = uv_batch.shape[0]
            # Initialize feature accumulation tensors on device
            points_feat_sum = torch.zeros((num_points, img_feats.shape[2]), device=gpu)
            points_feat_count = torch.zeros(num_points, device=gpu)
            # Unproject image features to the point cloud across all camera views
            for each_camera_view in range(img_feats.shape[1]):
                uv = uv_batch[:, each_camera_view * 2 : each_camera_view * 2 + 2]
                img_feat = img_feats_batch[each_camera_view]
                # Use the helper function to get RGB feature values
                indices, feat_values = self.get_rgb_values(uv, img_feat)
                # Accumulate features only if there are valid points
                if indices.numel() > 0 and feat_values.numel() > 0:
                    points_feat_sum[indices] += feat_values
                    points_feat_count[indices] += 1
            # Avoid division by zero by clamping
            points_feat_avg = points_feat_sum / points_feat_count.unsqueeze(-1).clamp(min=1e-6)
            points_feat_avg[points_feat_count == 0] = 0
            # Concatenate lidar coordinates with their corresponding averaged image features
            points_img_feats_all_batches.append(points_feat_avg)

        # stack vertically the points_img_feats_all_batches
        points_img_feats = torch.cat(points_img_feats_all_batches, dim=0)
        # create img feats sparse tensor, following coordinates as lidar coordinates
        ME_img_feat = ME.SparseTensor(
            features = points_img_feats,
            coordinate_manager = ME_pts_feat.coordinate_manager,
            coordinate_map_key = ME_pts_feat.coordinate_map_key,
            device = gpu
        )
        # pts_feat (ME_pts_feat)
        # sparse fusion of lidar and camera features
        ME_fused_feat = self.sparse_fusion(ME_img_feat, ME_pts_feat)
        _, fused_feats = ME_fused_feat.decomposed_coordinates_and_features
        points = []
        for each_fused_feats, each_original_pointcloud in zip(fused_feats, original_pointcloud):
            points.append(torch.cat([each_original_pointcloud, each_fused_feats], dim=1))
        # points in list[torch.Tensor] (n, 19) -> (x,y,z 16 features)

        voxels, num_points, coors = self.voxelize(points)
        if self.dataset_type == 'nuscenes':
            coors[:, 3] = 200 - coors[:, 3]  # Reverse the y direction
            coors = coors[:, [0, 2, 3, 1]] # move b,z,x,y,to b, x, y,z
        if self.dataset_type == 'waymo':
            coors = coors[:, [0, 3, 2, 1]]
        voxel_features = self.pts_voxel_encoder(voxels, num_points, coors)

        # process ground truth 3D semantic occupancy labels 
        voxel_semantics = kwargs['voxel_semantics'] # (b, 200, 200, 16)
        mask_camera = kwargs['mask_camera'] # (b, 200, 200, 16)
        # convert mask_camera to torch.bool
        mask_camera = mask_camera.to(torch.bool)
        if self.dataset_type == 'nuscenes' and self.use_mask:
            # set mask camera hits to 17 in voxel semantics
            voxel_semantics[mask_camera] = 17
            assert voxel_semantics.min() >= 0 and voxel_semantics.max() <= 17
        if self.dataset_type == 'waymo' and self.use_mask:
            # set mask camera hits to 15 in voxel semantics
            voxel_semantics[mask_camera] = 15
            assert voxel_semantics.min() >= 0 and voxel_semantics.max() <= 15
        coo_list_gt = [] 
        semantics_list_gt = []
        for b in range(voxel_semantics.shape[0]):
            current_voxel_grid = voxel_semantics[b]
            if self.dataset_type == 'nuscenes':
                mask = current_voxel_grid != 17
            if self.dataset_type == 'waymo':
                mask = current_voxel_grid != 15
            coords = torch.argwhere(mask) # (dense_points,3)
            coo_list_gt.append(coords)    
            all_feats = current_voxel_grid.view(-1, 1)  # (200*200*16, 1)
            all_coords = self.COO_format_coords.to(gpu)
            all_coords_and_feats = torch.cat([all_coords, all_feats], dim=1)
            semantics_list_gt.append(all_coords_and_feats) # (200*200*16, 4)

        # sparse to dense lidar semantic segmentation
        pts_sparse_tensor = ME.SparseTensor(
            features = voxel_features, 
            coordinates = coors,
            device = voxel_features.device,
        )
        cm = pts_sparse_tensor.coordinate_manager
        target_key, _ = cm.insert_and_map(
            ME.utils.batched_coordinates(coo_list_gt).to(voxel_features.device),
            string_id = "target",
        )

        # minkoccunet to process combined features 
        pts_feats = self.occ_backbone(pts_sparse_tensor)
        out_cls, targets, pts_feat = self.occ_neck(pts_feats, target_key)
        # bce loss
        bce_loss = self.lidar_neck.get_bce_loss(out_cls, targets)   
        losses['loss_bce'] = bce_loss * self.loss_bce_weight
        # ce loss
        if self.use_predicter:
            pts_feat = self.predicter(pts_feat)
        ce_loss = self.lidar_neck.get_ce_loss(pts_feat, semantics_list_gt)
        losses['loss_ce'] = ce_loss * self.loss_ce_weight

        return losses


@DETECTORS.register_module()
class BEVStereo4DOCC_Gaussian(BEVStereo4D):

    def __init__(self,
                 dataset_type='nuscenes',
                 gaussianinit=None,
                 **kwargs):
        super(BEVStereo4DOCC_Gaussian, self).__init__(**kwargs)
        self.dataset_type = dataset_type
        self.gaussianinit = builder.build_head(gaussianinit)

    def loss_single(self):
        pass

    def simple_test(self,
                    points,
                    img_metas,
                    img=None,
                    rescale=False,
                    **kwargs):
        """Test function without augmentaiton."""
        pass

    def forward_train(self,
                      points=None,
                      img_metas=None,
                      gt_bboxes_3d=None,
                      gt_labels_3d=None,
                      gt_labels=None,
                      gt_bboxes=None,
                      img_inputs=None,
                      proposals=None,
                      gt_bboxes_ignore=None,
                      **kwargs):
        """Forward training function.

        Args:
            points (list[torch.Tensor], optional): Points of each sample.
                Defaults to None.
                each point has shape (N, 8) -> (x, y, z, intensity, index, r, g, b)
            img_metas (list[dict], optional): Meta information of each sample.
                Defaults to None.
            gt_bboxes_3d (list[:obj:`BaseInstance3DBoxes`], optional):
                Ground truth 3D boxes. Defaults to None.
            gt_labels_3d (list[torch.Tensor], optional): Ground truth labels
                of 3D boxes. Defaults to None.
            gt_labels (list[torch.Tensor], optional): Ground truth labels
                of 2D boxes in images. Defaults to None.
            gt_bboxes (list[torch.Tensor], optional): Ground truth 2D boxes in
                images. Defaults to None.
            img (torch.Tensor optional): Images of each sample with shape
                (N, C, H, W). Defaults to None.
            proposals ([list[torch.Tensor], optional): Predicted proposals
                used for training Fast RCNN. Defaults to None.
            gt_bboxes_ignore (list[torch.Tensor], optional): Ground truth
                2D boxes in images to be ignored. Defaults to None.

        Returns:
            dict: Losses of different branches.
        """
        losses = dict()
        # lss 
        img_feats, _, depth = self.extract_feat(
            points, img=img_inputs, img_metas=img_metas, **kwargs)
        gt_depth = kwargs['gt_depth']
        loss_depth = self.img_view_transformer.get_depth_loss(gt_depth, depth)
        losses['loss_depth'] = loss_depth
        
        # use lidar points and multiview camera features to initialize 3D gaussians 
        gaussians = self.gaussianinit(points, img_feats, **kwargs)
        
        pppp
        return losses
    