# Convert sparse or dense 3D tensors into BEV 2D tensors by dimension rearrangement

import torch.nn as nn
import torch

class ConcatFusion(nn.Module):
    def __init__(self, model_cfg, **kwargs):
        super().__init__()
        self.model_cfg = model_cfg
        self.num_bev_features = self.model_cfg.NUM_BEV_FEATURES
        self.sparse_input = getattr(self.model_cfg, 'SPARSE_INPUT', True)
        self.nx, self.ny, self.nz = self.model_cfg.INPUT_SHAPE
        self.num_bev_features_before_compression = self.model_cfg.NUM_BEV_FEATURES // self.nz
        
    def forward(self, batch_dict):
        """
        Args:
            batch_dict:
                encoded_spconv_tensor: sparse tensor
        Returns:
            batch_dict:
                spatial_features:

        """
        if 'voxel_coords' in batch_dict:
            ## for DSVT pillar-based model
            """PointPillarScatter3d"""
            pillar_features, coords = batch_dict['pillar_features'], batch_dict['voxel_coords']
            
            batch_spatial_features_lidar = []
            batch_size = coords[:, 0].max().int().item() + 1
            for batch_idx in range(batch_size):
                spatial_feature = torch.zeros(
                    self.num_bev_features_before_compression,
                    self.nz * self.nx * self.ny,
                    dtype=pillar_features.dtype,
                    device=pillar_features.device)

                batch_mask = coords[:, 0] == batch_idx
                this_coords = coords[batch_mask, :]
                indices = this_coords[:, 1] * self.ny * self.nx + this_coords[:, 2] * self.nx + this_coords[:, 3]
                indices = indices.type(torch.long)
                pillars = pillar_features[batch_mask, :]
                pillars = pillars.t()
                spatial_feature[:, indices] = pillars
                batch_spatial_features_lidar.append(spatial_feature)

            batch_spatial_features_lidar = torch.stack(batch_spatial_features_lidar, 0)
            batch_spatial_features_lidar = batch_spatial_features_lidar.view(batch_size, self.num_bev_features_before_compression * self.nz, self.ny, self.nx)
            batch_dict['spatial_features_lidar'] = batch_spatial_features_lidar
            
        assert 'spatial_features' in batch_dict
        
        batch_dict['spatial_features'] = torch.cat([batch_dict['spatial_features'], batch_dict['spatial_features_lidar']], dim=1)
        if self.sparse_input: # False
            batch_dict['spatial_features_stride'] = batch_dict['encoded_spconv_tensor_stride']
        else:
            batch_dict['spatial_features_stride'] = 1
            
        return batch_dict
