import torch
import torch.nn as nn
from torch.functional import Tensor

class lolo_vfe(nn.Module):
    def __init__(self,
                 grid_size,
                 pointfeature=4,
                 shapesize=(4,4), **kwargs):
        super().__init__()        
        self.nx, self.ny, self.nz = grid_size
        self.pointfeature = pointfeature
        self.SHAPE_SIZE = shapesize
        self.num_bev_features1 = self.SHAPE_SIZE[0]*self.SHAPE_SIZE[1]*self.pointfeature
        assert self.nz == 1

    def forward_default(self, input, **kwargs):
        # pillar_features, coords = batch_dict['pillar_features'], batch_dict['voxel_coords']
        # print(batch_dict['points'].shape,batch_dict['points'].type)
        pillar_features, coords=input
        batch_spatial_features = []
        batch_size = coords[:, 0].max().int().item() + 1
        for batch_idx in range(batch_size):
            spatial_feature = torch.zeros(
                self.num_bev_features1,
                self.nz * self.nx * self.ny,
                dtype=pillar_features.dtype,
                device=pillar_features.device)

            batch_mask = coords[:, 0] == batch_idx
            this_coords = coords[batch_mask, :]
            indices = this_coords[:, 1] + this_coords[:, 2] * self.nx + this_coords[:, 3]
            indices = indices.type(torch.long)
            pillars = pillar_features[batch_mask, :]
            pillars = pillars.t()
            # print(pillars.shape,spatial_feature.shape)
            spatial_feature[:, indices] = pillars
            batch_spatial_features.append(spatial_feature)

        batch_spatial_features = torch.stack(batch_spatial_features, 0)
        batch_spatial_features = batch_spatial_features.view(batch_size, self.num_bev_features1 * self.nz, self.ny, self.nx)
        batch_spatial_features = batch_spatial_features.view(batch_size, self.SHAPE_SIZE[0],self.SHAPE_SIZE[1],self.pointfeature, self.ny, self.nx)
        batch_spatial_features = batch_spatial_features.permute([0,3,4,1,5,2]).contiguous()
        batch_spatial_features = batch_spatial_features.view(batch_size,self.pointfeature,self.ny*self.SHAPE_SIZE[0],self.nx*self.SHAPE_SIZE[1])
        ###### TODO
        #from .Lidarnet_preprocessing import Voxels_v1
        #points = batch_dict['points'].detach().cpu().numpy()
        #import pdb
        #pdb.set_trace()
        #voxel_size = [0.16,0.16,8]
        #coors_range = [0, -39.68, -2.5, 69.12, 39.68, 5.5]
        #max_points_per_voxels = 4
        #voxels_out = Voxels_v1(points[:,1:],voxel_size,coors_range,max_points_per_voxels)
        #batch_spatial_features1 = torch.tensor(voxels_out,device=pillar_features.device)
        #batch_spatial_features1.unsqueeze_(0)
#        ######
        return batch_spatial_features.contiguous()

    def forward_trans(self, input):
        return input

    def forward(self, input, **kwargs):
        if isinstance(input, tuple) or isinstance(input, list):
            return self.forward_default(input, kwargs=kwargs)
        elif isinstance(input, Tensor):
            return self.forward_trans(input)
        else:
            raise TypeError('unsupported type of input')
