import torch
import warnings
from torch import nn
from mmcv.ops import Voxelization
import torch.nn.functional as F
from mmcv.runner import force_fp32, BaseModule
from mmcv.cnn import build_norm_layer
from mmdet.models.builder import BACKBONES

from tools.deploy.ndarray_io.ndarray_io import load_ndarray


# from deploy.ndarray_io.ndarray_io import save_ndarray, load_ndarray

@BACKBONES.register_module()
class PillarNet(BaseModule):
    def __init__(self,
                 voxel_size,
                 point_cloud_range,
                 max_num_points,
                 max_voxels,
                 out_channel,
                 pt_type='lidar',
                 init_cfg=None,
                 norm_cfg=dict(type='BN', eps=1e-3, momentum=0.01),
                 dim_xy=False,
                 pretrained=None,
                 output_pillar=False
                 ):
        super().__init__(init_cfg=init_cfg)
        self.pt_type = pt_type
        if self.pt_type == 'lidar':
            in_channel = 9
            self.num_features = 3  # use [xyz] for lidar
        elif self.pt_type == 'radar':
            in_channel = 11
            # self.num_features = 3  # use [xyz] for radar
            self.num_features = 6  # use [x,y,z,rcs,vx_comp,vy_comp] for radar
        self.voxelizer = Voxelizer(voxel_size, point_cloud_range, max_num_points, max_voxels)
        self.pillarencoder = PillarEncoder(voxel_size, point_cloud_range, in_channel, out_channel, init_cfg, norm_cfg,dim_xy,
                                           pretrained, output_pillar)

        assert not (init_cfg and pretrained), \
            'init_cfg and pretrained cannot be setting at the same time'
        if isinstance(pretrained, str):
            warnings.warn('DeprecationWarning: pretrained is a deprecated, '
                          'please use "init_cfg" instead')
            self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)
        else:
            self.init_cfg = dict(type='Kaiming', layer='Conv1d')

    def forward(self, batched_pts):
        if len(batched_pts[0])==0:
            warnings.warn('len(batched_pts[0])==0')
            batched_pts[0] = torch.zeros(size=(1, batched_pts[0].shape[1])).to(batched_pts[0])
        pillars, npoints_per_pillar, coors_batch = self.voxelizer(batched_pts)
        batched_canvas = self.pillarencoder(pillars[:, :, 0:self.num_features], npoints_per_pillar, coors_batch)

        # # 验证新onnx导出
        # device = pillars.device
        # points_mean = (
        #         pillars[:, :, :3].sum(dim=1, keepdim=True) /
        #         npoints_per_pillar.type_as(pillars).view(-1, 1, 1))
        # offset_pt_center = pillars[:, :, :3] - points_mean  # (p1 + p2 + ... + pb, num_points, 3)
        # # 2. calculate offset to the pillar center  点云位置(x,y) - pillar栅格位置
        # x_offset_pi_center = pillars[:, :, 0:1] - (
        #         coors_batch[:, None, 3:4] * self.pillarencoder.vx + self.pillarencoder.x_offset)  # (p1 + p2 + ... + pb, num_points, 1)
        # y_offset_pi_center = pillars[:, :, 1:2] - (
        #         coors_batch[:, None, 2:3] * self.pillarencoder.vy + self.pillarencoder.y_offset)  # (p1 + p2 + ... + pb, num_points, 1)
        # z_offset_pi_center = pillars[:, :, 2:3] - (
        #         coors_batch[:, None, 1:2] * self.pillarencoder.vz + self.pillarencoder.z_offset)  # (p1 + p2 + ... + pb, num_points, 1)
        # features = torch.cat([pillars[:, :, :],
        #                       offset_pt_center,
        #                       x_offset_pi_center,
        #                       y_offset_pi_center,
        #                       z_offset_pi_center],
        #                      dim=-1)  # (p1 + p2 + ... + pb, num_points, 3+3+2)
        # features[:, :, 0:1] = x_offset_pi_center  # tmp
        # features[:, :, 1:2] = y_offset_pi_center  # tmp
        # voxel_ids = torch.arange(0, pillars.size(1)).to(device)  # (num_points, )
        # mask = voxel_ids[:, None] < npoints_per_pillar[None, :]  # (num_points, p1 + p2 + ... + pb)
        # mask = mask.permute(1, 0).contiguous()  # (p1 + p2 + ... + pb, num_points)标记pillar中32点是否存在
        # features *= mask[:, :, None]
        # features = features.permute(0, 2, 1).contiguous()  # (p1 + p2 + ... + pb, 9, num_points)
        #
        # # (n,9,32)->(gx*gy,9,32)
        # voxel_features = torch.zeros((self.pillarencoder.y_l,self.pillarencoder.x_l,features.shape[1], features.shape[2])).to(features)
        # voxel_mask = torch.zeros((self.pillarencoder.y_l,self.pillarencoder.x_l)).to(features)
        # voxel_features[coors_batch[:,2],coors_batch[:,3]]=features
        # voxel_mask[coors_batch[:,2],coors_batch[:,3]]=1
        # voxel_features = voxel_features.view(voxel_features.shape[0]*voxel_features.shape[1],voxel_features.shape[2],voxel_features.shape[3])
        # batched_canvas2 = self.export_onnx(voxel_features,coors_batch,voxel_mask)
        # # 验证新onnx导出

        return batched_canvas


    def export_onnx(self, voxel_features, coords_idxs, voxel_mask):
        '''
        Args:
            voxel_features: [ny*nx,9,32]
            coords_idxs: [ny*nx，4]
            voxel_mask: [ny*nx,]

        Returns:
        '''
        voxel_features = self.pillarencoder.pfn_layer(voxel_features)
        canvas = torch.zeros(
            self.pillarencoder.x_l * self.pillarencoder.y_l,
            self.pillarencoder.out_channel,
            dtype=voxel_features.dtype,
            device=voxel_features.device)

        # Now scatter the blob back to the canvas.
        inds = coords_idxs.long()
        # voxel_features = voxel_features.squeeze(-1)
        voxel_features = voxel_features * voxel_mask
        canvas[inds] = voxel_features
        # Undo the column stacking to final 4-dim tensor
        canvas = canvas.view(1, self.pillarencoder.y_l , self.pillarencoder.x_l, self.pillarencoder.out_channel)
        canvas = canvas.permute(0, 3, 1, 2)
        return canvas

class Voxelizer(nn.Module):
    def __init__(self, voxel_size, point_cloud_range, max_num_points, max_voxels):
        super().__init__()
        self.voxel_layer = Voxelization(voxel_size=voxel_size,
                                        point_cloud_range=point_cloud_range,
                                        max_num_points=max_num_points,
                                        max_voxels=max_voxels)
        # self.voxel_layer2 = MYVoxel(voxel_size, point_cloud_range, max_num_points, max_voxels)

    @torch.no_grad()
    @force_fp32()
    def forward(self, batched_pts):
        pillars, coors, npoints_per_pillar = [], [], []
        for i, pts in enumerate(batched_pts):
            # 输入点云[n,c],输出 [有效pillar [p,m,c], 有效pillar在map中的坐标[p,3], 有效pillar实际点数[p,](实际数量<=m)]
            voxels_out, coors_out, num_points_per_voxel_out = self.voxel_layer(pts)
            # voxels_out2, coors_out2, num_points_per_voxel_out2 = self.voxel_layer2(pts)
            # voxels_out: (max_voxel, num_points, c), coors_out: (max_voxel, 3)
            # num_points_per_voxel_out: (max_voxel, )
            pillars.append(voxels_out)
            coors.append(coors_out.long())
            npoints_per_pillar.append(num_points_per_voxel_out)

        pillars = torch.cat(pillars, dim=0)  # (p1 + p2 + ... + pb, num_points, c) 合并batch,为什么???
        npoints_per_pillar = torch.cat(npoints_per_pillar, dim=0)  # (p1 + p2 + ... + pb, )
        coors_batch = []
        for i, cur_coors in enumerate(coors):  # 这一步把batch的id,记在了pillar坐标的第0维
            coors_batch.append(F.pad(cur_coors, (1, 0), value=i))
        coors_batch = torch.cat(coors_batch, dim=0)  # (p1 + p2 + ... + pb, 1 + 3)

        return pillars, npoints_per_pillar, coors_batch


class PFNLayer(BaseModule):
    def __init__(self,
                 in_channel,
                 out_channel,
                 init_cfg=None,
                 norm_cfg=dict(type='BN', eps=1e-3, momentum=0.01),
                 pretrained=None,
                 dump_tensor_dir = None
                 ):
        super(PFNLayer, self).__init__(init_cfg=init_cfg)
        self.in_channel = in_channel
        self.out_channel = out_channel
        self.conv = nn.Conv1d(in_channel, out_channel, 1, bias=False)
        self.bn = nn.BatchNorm1d(num_features=out_channel)
        self.act = nn.ReLU()
        self.dump_tensor_dir = dump_tensor_dir

    def forward(self, features):
        # data = load_ndarray(load_path='/home/adt/codes/python/width-former/WidthFormer/BEVDet/experiments/widthformer_slim_l_360/models/tensor/cplus/PFN_input.npy').squeeze(2)
        # features = torch.Tensor(data).cuda()
        features = self.act(self.bn(self.conv(features)))  # (p1 + p2 + ... + pb, out_channels, num_points)
        pooling_features = torch.max(features, dim=-1)[0]  # (p1 + p2 + ... + pb, out_channels)
        return pooling_features

    def register_hooks(self, dump_tensor_dir=None, load_tensor_dir=None):
        self.dump_tensor_dir = dump_tensor_dir
        self.load_tensor_dir = load_tensor_dir
        # 为当前 FPN 实例注册前向钩子
        if self.dump_tensor_dir is not None:
            self.save_handle = self.register_forward_hook(self.save_hook)
        if self.load_tensor_dir is not None:
            self.load_handle = self.register_forward_pre_hook(self.load_input_hook)

    def remove_hooks(self):
        if self.dump_tensor_dir is not None:
            self.save_handle.remove()
    def save_hook(self, module, input, output):
        # save_ndarray(data_in=input[0], save_path=self.dump_tensor_dir, tensor_name='PFN_input')
        # save_ndarray(data_in=output, save_path=self.dump_tensor_dir, tensor_name='PFN_output')
        pass

    def load_input_hook(self, module, input):
        pass
        # data = load_ndarray(load_path=self.load_tensor_dir).squeeze(2)
        # input[0] = torch.Tensor(data).cuda()

class PillarEncoder(BaseModule):
    def __init__(self,
                 voxel_size,
                 point_cloud_range,
                 in_channel,
                 out_channel,
                 init_cfg=None,
                 norm_cfg=dict(type='BN', eps=1e-3, momentum=0.01),
                 dim_xy=False,
                 pretrained=None,
                 output_pillar=False,
                 ):
        super().__init__()
        self.in_channel = in_channel  # 每个点8/10个特征
        self.out_channel = out_channel
        self.vx, self.vy, self.vz = voxel_size[0], voxel_size[1], voxel_size[2]
        self.x_offset = voxel_size[0] / 2 + point_cloud_range[0]
        self.y_offset = voxel_size[1] / 2 + point_cloud_range[1]
        self.z_offset = voxel_size[2] / 2 + point_cloud_range[2]
        self.x_l = int((point_cloud_range[3] - point_cloud_range[0]) / voxel_size[0])
        self.y_l = int((point_cloud_range[4] - point_cloud_range[1]) / voxel_size[1])
        self.dim_xy = dim_xy
        self.pfn_layer = PFNLayer(in_channel=self.in_channel,
                                  out_channel=out_channel,
                                  norm_cfg=norm_cfg)
        self.output_pillar = output_pillar

    def forward(self, pillars, npoints_per_pillar, coors_batch):
        '''
        pillars: (p1 + p2 + ... + pb, num_points, c), c = 3
        npoints_per_pillar: (p1 + p2 + ... + pb, )
        coors_batch: (p1 + p2 + ... + pb, 1 + 3)
        return:  (bs, out_channel, y_l, x_l)
        '''
        device = pillars.device
        # bzyx转bxyz
        # coors_batch = coors_batch[:, [0, 3, 2, 1]]
        # 1. calculate offset to the points center (in each pillar)  点位置(x,y,z)-pillar中心位置
        points_mean = (
                pillars[:, :, :3].sum(dim=1, keepdim=True) /
                npoints_per_pillar.type_as(pillars).view(-1, 1, 1))
        offset_pt_center = pillars[:, :, :3] - points_mean  # (p1 + p2 + ... + pb, num_points, 3)

        # 2. calculate offset to the pillar center  点云位置(x,y) - pillar栅格位置
        x_offset_pi_center = pillars[:, :, 0:1] - (
                coors_batch[:, None, 3:4] * self.vx + self.x_offset)  # (p1 + p2 + ... + pb, num_points, 1)
        y_offset_pi_center = pillars[:, :, 1:2] - (
                coors_batch[:, None, 2:3] * self.vy + self.y_offset)  # (p1 + p2 + ... + pb, num_points, 1)
        z_offset_pi_center = pillars[:, :, 2:3] - (
                coors_batch[:, None, 1:2] * self.vz + self.z_offset)  # (p1 + p2 + ... + pb, num_points, 1)

        # 3. encoder
        # [x,y,z,adx,ady,adz,cdx,cdy,cdy]
        features = torch.cat([pillars[:, :, :],
                              offset_pt_center,
                              x_offset_pi_center,
                              y_offset_pi_center,
                              z_offset_pi_center],
                             dim=-1)  # (p1 + p2 + ... + pb, num_points, 3+3+2)

        # 前两个维度换成 [x,y,z,adx,ady,adz,cdx,cdy] -> [cdx,cdy,z,adx,ady,adz,cdx,cdy]
        features[:, :, 0:1] = x_offset_pi_center  # tmp
        features[:, :, 1:2] = y_offset_pi_center  # tmp
        # In consitent with mmdet3d.
        # The reason can be referenced to https://github.com/open-mmlab/mmdetection3d/issues/1150

        # 4. find mask for (0, 0, 0) and update the encoded features  就是只保留每个pillar中有点的，无点的用0补充
        # a very beautiful implementation
        voxel_ids = torch.arange(0, pillars.size(1)).to(device)  # (num_points, )
        mask = voxel_ids[:, None] < npoints_per_pillar[None, :]  # (num_points, p1 + p2 + ... + pb)
        mask = mask.permute(1, 0).contiguous()  # (p1 + p2 + ... + pb, num_points)标记pillar中32点是否存在
        features *= mask[:, :, None]

        # 5. embedding  所谓pointnet pillar数作为了batch维度，所以可变
        features = features.permute(0, 2, 1).contiguous()  # (p1 + p2 + ... + pb, 9, num_points)

        # frame = str(0)
        # print(f"读入idx{frame}测试pillar特征: ")
        # from tools.deploy.ndarray_io.ndarray_io import load_ndarray, get_cosine_similarity
        # dummy_pillar = load_ndarray(f'/home/adt/test/tensors/cplus/PrepLidar_input_pillars{frame}.npy')
        # lens = 100000
        # print(get_cosine_similarity(dummy_pillar.reshape(-1)[:lens],
        #                             features.detach().cpu().numpy().reshape(-1)[:lens]))

        pooling_features = self.pfn_layer(features).to(torch.float32)

        if self.output_pillar:
            batch_dict = dict()
            batch_dict['pillar_features'] = batch_dict['voxel_features'] = pooling_features
            batch_dict['voxel_coords'] = coors_batch
            return batch_dict

        # 6. pillar scatter
        batched_canvas = []
        bs = coors_batch[-1, 0] + 1
        for i in range(bs):
            cur_coors_idx = coors_batch[:, 0] == i
            cur_coors = coors_batch[cur_coors_idx, :]  # 取出i号batch的pillar坐标
            cur_features = pooling_features[cur_coors_idx]  # 取出i号batch的pillar特征

            canvas = torch.zeros((self.x_l, self.y_l, self.out_channel), dtype=torch.float32, device=device)
            canvas[cur_coors[:, 3], cur_coors[:, 2]] = cur_features  # 按照索引在map中存pillar特征
            if self.dim_xy:
                canvas = canvas.permute(2, 0, 1).contiguous()  # 此时是cxy
            else:
                canvas = canvas.permute(2, 1, 0).contiguous()  # 此时是cyx chw
            batched_canvas.append(canvas)
        batched_canvas = torch.stack(batched_canvas, dim=0)  # (bs, in_channel, self.y_l, self.x_l) #append是list，合并batch
        return batched_canvas
