# Copyright (c) OpenMMLab. All rights reserved.
import copy

import torch
from mmcv.cnn import ConvModule, build_conv_layer
from mmcv.runner import BaseModule
from torch import nn
import numpy as np
from mmdet3d.core import (circle_nms, draw_heatmap_gaussian, gaussian_radius,
                          xywhr2xyxyr)
from mmdet3d.core.post_processing import nms_bev
from mmdet3d.models import builder
from mmdet3d.models.utils import clip_sigmoid
from mmdet.core import build_bbox_coder, multi_apply, reduce_mean
from ..builder import HEADS, build_loss
from .centerpoint_head import CenterHead
from mmdet3d.ops.voxel_interpolation import voxel_interpolation_cpu as VI


@HEADS.register_module()
class BevMapHead(CenterHead):
    """CenterHead for CenterPoint.

    Args:
        in_channels (list[int] | int, optional): Channels of the input
            feature map. Default: [128].
        tasks (list[dict], optional): Task information including class number
            and class names. Default: None.
        train_cfg (dict, optional): Train-time configs. Default: None.
        test_cfg (dict, optional): Test-time configs. Default: None.
        bbox_coder (dict, optional): Bbox coder configs. Default: None.
        common_heads (dict, optional): Conv information for common heads.
            Default: dict().
        loss_cls (dict, optional): Config of classification loss function.
            Default: dict(type='GaussianFocalLoss', reduction='mean').
        loss_bbox (dict, optional): Config of regression loss function.
            Default: dict(type='L1Loss', reduction='none').
        separate_head (dict, optional): Config of separate head. Default: dict(
            type='SeparateHead', init_bias=-2.19, final_kernel=3)
        share_conv_channel (int, optional): Output channels for share_conv
            layer. Default: 64.
        num_heatmap_convs (int, optional): Number of conv layers for heatmap
            conv layer. Default: 2.
        conv_cfg (dict, optional): Config of conv layer.
            Default: dict(type='Conv2d')
        norm_cfg (dict, optional): Config of norm layer.
            Default: dict(type='BN2d').
        bias (str, optional): Type of bias. Default: 'auto'.
    """

    def __init__(self,
                 in_channels=[128],
                 tasks=None,
                 train_cfg=None,
                 test_cfg=None,
                 map_coder=None,
                 common_heads=dict(),
                 loss_cls=dict(type='GaussianFocalLoss', reduction='mean'),
                 loss_map=dict(
                     type='L1Loss', reduction='none', loss_weight=0.25),
                 separate_head=dict(
                     type='SeparateHead', init_bias=-2.19, final_kernel=3),
                 share_conv_channel=64,
                 num_heatmap_convs=2,
                 conv_cfg=dict(type='Conv2d'),
                 norm_cfg=dict(type='BN2d'),
                 bias='auto',
                 norm_map=True,
                 init_cfg=None,
                 task_specific=True):
        assert init_cfg is None, 'To prevent abnormal initialization ' \
            'behavior, init_cfg is not allowed to be set'
        super(BevMapHead, self).__init__(
            in_channels=in_channels,
            tasks=tasks,
            train_cfg=train_cfg,
            test_cfg=test_cfg,
            bbox_coder=map_coder,
            common_heads=common_heads,
            separate_head=separate_head,
            share_conv_channel=share_conv_channel,
            num_heatmap_convs=num_heatmap_convs,
            conv_cfg=conv_cfg,
            norm_cfg=norm_cfg,
            bias=bias,
            init_cfg=init_cfg)

        self.norm_map = norm_map

        self.loss_cls = build_loss(loss_cls)
        self.loss_map = build_loss(loss_map)
        # self.bbox_coder = build_box_coder(map_coder)
        # self.bbox_coder = build_box_coder(line_coder)
        # a shared convolution
        if self.train and self.train_cfg is not None:
            self.init_markv(self.train_cfg)
            self.distance_aux = train_cfg.get('distance_aux', False)
            self.dis_markv = train_cfg.get('distance_markv', False)
            self.dis_markv *= self.distance_aux
            self.aux_l1_beta = train_cfg.get('aux_l1_beta', 0.0)
            self.reg_std = train_cfg.get('reg_std', False)
            self.pcr = copy.deepcopy(train_cfg['point_cloud_range'])
            self.grid_size = copy.deepcopy(train_cfg['grid_size'])
            self.map_layer = train_cfg.get('map_assign_layer', 0)
    def init_markv(self, train_cfg):
        up = train_cfg.get('up', 0.9)
        lp = train_cfg.get('lp', 0.1)
        upl = train_cfg.get('upl', 0.9)
        grid_size = torch.tensor(train_cfg['grid_size'])
        vs = np.array(train_cfg['voxel_size']) * train_cfg['out_size_factor']
        feature_map_size = grid_size[:2] // train_cfg['out_size_factor']
        pcr = train_cfg['point_cloud_range']

        xs = torch.arange(0, feature_map_size[1], 1, dtype=torch.float32).cuda()
        ys = torch.arange(0, feature_map_size[0], 1, dtype=torch.float32).cuda()

        xs = xs + pcr[0] / vs[1]
        ys = ys + pcr[1] / vs[0]
        xs, ys = torch.meshgrid([xs, ys])  # [x_grid, y_grid]
        xys = torch.stack((ys, xs), -1)
        xys = xys.norm(dim=2, p=2).unsqueeze(0)
        xys = 1 - (xys / xys.max())
        up = xys * (up - upl) + upl
        self.k_markv = 1 / (up - lp)
        self.b_markv = -lp / (up - lp)
        
    def forward_pts_train(self,
                          pts_feats,
                          gt_maps_3d,
                          img_metas,
                          gt_bboxes_ignore=None):
        """Forward function for point cloud branch.

        Args:
            pts_feats (list[torch.Tensor]): Features of point cloud branch
            gt_bboxes_3d (list[:obj:`BaseInstance3DBoxes`]): Ground truth
                boxes for each sample.
            gt_labels_3d (list[torch.Tensor]): Ground truth labels for
                boxes of each sampole
            img_metas (list[dict]): Meta information of samples.
            gt_bboxes_ignore (list[torch.Tensor], optional): Ground truth
                boxes to be ignored. Defaults to None.
        Returns:
            dict: Losses of each branch.
        """
        outs = self.forward(pts_feats)
        loss_inputs = [gt_maps_3d, outs]
        losses = self.loss(*loss_inputs)
        return losses

    def simple_test_maps(self, x, img_metas, rescale=False):
        """Test function of point cloud branch."""
        outs = self.forward(x)
        # return [outs]
        if 0:
            for k, o in enumerate(outs):
                for i in o[0]:
                    o[0][i].cpu().numpy().tofile(f'./tmp/tmp/map_head_{k}_{i}.bin')
        # bbox_list = [outs]
        bbox_list = self.get_maps(
            outs, img_metas, rescale=rescale)
        # bbox_results = [
        #     bbox3d2result(bboxes, scores, labels)
        #     for bboxes, scores, labels in bbox_list
        # ]
        return bbox_list

    # def get_turn_type(self, turn: torch.Tensor):
    #     cls_turn = torch.new_zeros(turn.shape[0], 6)

    def get_targets(self, gt_maps_3d, preds_dicts):
        """Generate targets.
        How each output is transformed:

            Each nested list is transposed so that all same-index elements in
            each sub-list (1, ..., N) become the new sub-lists.
                [ [a0, a1, a2, ... ], [b0, b1, b2, ... ], ... ]
                ==> [ [a0, b0, ... ], [a1, b1, ... ], [a2, b2, ... ] ]

            The new transposed nested list is converted into a list of N
            tensors generated by concatenating tensors in the new sub-lists.
                [ tensor0, tensor1, tensor2, ... ]

        Args:
            gt_bboxes_3d (list[:obj:`LiDARInstance3DBoxes`]): Ground
                truth gt boxes.
            gt_labels_3d (list[torch.Tensor]): Labels of boxes.

        Returns:
            Returns:
                tuple[list[torch.Tensor]]: Tuple of target including
                    the following results in order.

                    - list[torch.Tensor]: Heatmap scores.
                    - list[torch.Tensor]: Ground truth boxes.
                    - list[torch.Tensor]: Indexes indicating the
                        position of the valid boxes.
                    - list[torch.Tensor]: Masks indicating which
                        boxes are valid.
        """
        preds_per_voxel = self.train_cfg['preds_per_voxel']
        #  heatmaps, anno_boxes, inds, masks
        dense_map = [self.get_targets_single(i) for i in gt_maps_3d]
        # dense_map = [torch.from_numpy(i).cuda() for i in dense_map]
        # stack
        map_cls = [preds_dict[0]['heatmap'].detach().clone()[:,:2] for \
                                          preds_dict in preds_dicts]
        
        map_reg = [preds_dict[0]['reg'].detach().clone() for \
                                         preds_dict in preds_dicts]
        map_cls = torch.cat(map_cls, 1)
        map_reg = torch.cat(map_reg, 1)
        coors = []
        for i, coor in enumerate(dense_map):
            if len(coor) > 0:
                coor_pad = np.pad(coor, ((0, 0), (1, 0)), \
                             mode='constant', constant_values=i)
                coors.append(coor_pad)
        # for k,c in enumerate(coors):
        #     print(k,c.shape)
        if len(coors) ==0:
            coors = [np.zeros((1,9),dtype=np.float32)] # bugfix for None lines
        dense_map = np.concatenate(coors, axis=0)
        dense_map = torch.from_numpy(dense_map).cuda()
        # dense_map = self.dense_pts_filter(dense_map, preds_per_voxel)
        # self.assert_markv(dense_map)
        if self.train_cfg.get('code_weights', 2):
            lines_pts, double_line_voxels = self.assign(map_cls, 
                                map_reg, 
                                dense_map, 
                                preds_per_voxel)
        else:
            lines_pts, double_line_voxels = self.assign_pts(map_cls, 
                                map_reg, 
                                dense_map, 
                                preds_per_voxel)
        # lines_pts1 = self.assign_(map_cls, 
        #                         map_reg, 
        #                         dense_map.clone(), 
        #                         preds_per_voxel)
        # assert lines_pts.shape[0] == lines_pts1.shape[0]
        # self.assert_target(lines_pts, map_reg)
        # self.assert_target(lines_pts1, map_reg)
        # assert (lines_pts - lines_pts1).abs().sum() < 1e-3
        grid_size = torch.tensor(self.grid_size)
        # pc_range = torch.tensor(self.train_cfg['point_cloud_range'])
        # voxel_size = torch.tensor(self.train_cfg['voxel_size'])
        hotmaps, inds, map_regs,line_type_maps = [],[],[],[]
        # feature_map_size = grid_size[:2] // self.train_cfg['out_size_factor']
        feature_map_size = (grid_size[:2] / self.train_cfg['out_size_factor']).round().int()
        onehot_doublelines = torch.zeros(size=(map_cls.shape[0]* \
                            feature_map_size[1]* \
                            feature_map_size[0]* \
                            len(preds_dicts),),
                            device=map_cls.device, 
                            dtype=map_cls.dtype)
        onehot_doublelines[double_line_voxels] = 1
        onehot_doublelines = onehot_doublelines.view(map_cls.shape[0],
                            feature_map_size[1],
                            feature_map_size[0],
                            len(preds_dicts))
        onehot_doublelines = onehot_doublelines.permute([3,0,1,2])
        onehot_doublelines = [i.unsqueeze(1) for i in onehot_doublelines]
        onehot_doublelines[0][:] = 0
        onehot_doublelines[2][:] = 0
        for k, pred in enumerate(preds_dicts):
            # B, C, H, W = map_cls
            c_num = 2 if k != 7 else 8
            onehot = torch.zeros(size=(map_cls.shape[0],
                                         c_num, 
                                        feature_map_size[1],
                                        feature_map_size[0]),
                                        device=map_cls.device, 
                                        dtype=map_cls.dtype)

            mask_ = lines_pts[:, 8] == k
            lines_pt = lines_pts[mask_]
            ind = lines_pt[:, [0, 2, 4, 3, 1]].long() # B C H W
            # kernel = torch.ones(1,2,3,3).cuda()
            # a = F.conv2d(onehot,kernel,None,1,1)
            if k ==7:
                ind[:, 1] = 0
            onehot[ind[:, 0], ind[:, 1], ind[:, 2], ind[:, 3]] = 1
            if k == 7:
                for i in range(6):
                    _i = 5-i
                    tmp = lines_pt[:,5] / (10**(_i))
                    lines_pt[:,5] = lines_pt[:,5] % (10**(_i))
                    if len(tmp) > 0:
                        onehot[ind[:, 0], i+2, ind[:, 2], ind[:, 3]] = tmp
            if self.train_cfg.get('lane_type', False) and \
                self.train_cfg.get('lane_type_mask',[False,False,False,False])[k]:
                onehot_line_type = torch.zeros_like(onehot)
                onehot_line_type[ind[:, 0], ind[:, 1], ind[:, 2], ind[:, 3]] = lines_pt[:, 5]
                line_type_maps.append(onehot_line_type)
            else:
                line_type_maps.append([])
            if self.train_cfg.get('code_weights', 2):
                map_reg = lines_pt[:,[6, 7]]
            else:
                map_reg = lines_pt[:,9:]
            hotmaps.append(onehot)
            inds.append(ind)
            map_regs.append(map_reg)
        return hotmaps, inds, map_regs, onehot_doublelines, line_type_maps

    def assert_target(self,ll, map_reg):
        # ll = l2
        cls_target = ll[:, [0, 3, 4, 8]]
        # mask_delete = torch.ones(lines_pts_dense.shape[0], dtype=bool)

        uni, count = torch.unique(cls_target, sorted=False, return_counts=True, dim=0)

        for i in range(uni.shape[0]):
            index = ((cls_target == uni[i]).sum(-1) == 4).nonzero()
            assert index.shape[0] == count[i],f"{index.shape} == {count[i]}"
            if count[i] == 1:
                b, p, h, w, c, = ll[index[0],[0, 2, 4, 3, 8]].long()
                dd12 = ll[index,[6, 7]]
                dd = map_reg[b,:,h,w].reshape(3, 2, 2)[c]
                if p == 0:
                    assert (dd[0] - dd12).norm(p=2).sum() <= (dd[1] - dd12).norm(p=2).sum(), \
                        f"{i}--{b.item()}, {h.item()}, {w.item()}, {c.item()}, {p.item()}, {(dd[0] - dd12).norm(p=2).sum().item()}, {(dd[1] - dd12).norm(p=2).sum().item()}"
                else:
                    assert (dd[0] - dd12).norm(p=2).sum() > (dd[1] - dd12).norm(p=2).sum(), \
                        f"{i}--{b.item()}, {h.item()}, {w.item()}, {c.item()},{p.item()}, {(dd[1] - dd12).norm(p=2).sum().item()}, {(dd[0] - dd12).norm(p=2).sum().item()}"
            else:
                b, p, h, w, c, = ll[index[0],[0, 2, 4, 3, 8]].long()
                b2, p2, h2, w2, c2, = ll[index[1],[0, 2, 4, 3, 8]].long()

                dd12 = ll[index,[6, 7]].reshape(2,1,2)
                dd = map_reg[b,:,h,w].reshape(3, 2, 2)[c].reshape(1,2,2)
                reg_norm = (dd12 - dd).norm(dim=2, p=2).view(4)
                assert p + p2 == 1
                if p == 0:
                    assert reg_norm[[0, 3]].sum(-1) <= reg_norm[[1, 2]].sum(-1), \
                            f"{i}--{b.item()}, {h.item()}, {w.item()}, {c.item()}, {p.item()}, {reg_norm[[0, 3]].sum(-1)}, {reg_norm[[1, 2]].sum(-1)}"
                else:
                    assert reg_norm[[0, 3]].sum(-1) > reg_norm[[1, 2]].sum(-1),\
                            f"{i}--{b.item()},{h.item()},{w.item()},{c.item()}, {p.item()}, {reg_norm[[0, 3]].sum(-1)}, {reg_norm[[1, 2]].sum(-1)}"
            
    def assign_(self,dt_cls, dt_lines, dense_map, pv):
        dense_map = self.dense_pts_filter(dense_map, pv)
        lines_pts = self.set_lane_dense_pts_pos(dt_cls, 
                                dt_lines, 
                                dense_map, 
                                pv)
        return lines_pts

    def assign_pts(self, dt_cls, dt_lines, lines_pts_dense, pv):
        # lines_pts_dense:  batch_size lines_id pose x y z dx dy type
        # dt_lines: p1x p1y p2x p2y
        device = lines_pts_dense.device
        d0x, d0y = self.decode_one_voxels(lines_pts_dense[..., 6])
        d1y, d1x = self.decode_one_voxels(lines_pts_dense[..., 7])

        dxy = torch.stack([d0x, d0y, d1x, d1y], -1).reshape(-1, 2, 2)
        # new_lines_pts_dense = lines_pts_dense.clone()
        lines_pts_dense[:,2] = -1
        # PR = dt_lines.clone()
        dt_cls = dt_cls.permute([0, 2, 3, 1]).contiguous()
        dt_lines = dt_lines.permute([0, 2, 3, 1]).contiguous()
        dt_lines = torch.clamp(dt_lines,min=0, max=1)
        cls_target = lines_pts_dense[:, [0, 3, 4, 8]] # batch_size x y type
        cls_target = cls_target.long()

        N, H, W, C = dt_cls.shape
        cls_num = int(C / pv)
        dt_cls = dt_cls.view(-1 , pv)
        dt_lines = dt_lines.view(-1, pv, 2, 2)
        reg_num = 1

        idx = (cls_target[:, 0] * W * H * cls_num + \
              cls_target[:, 2] * W * cls_num + \
              cls_target[:, 1] * cls_num + \
              cls_target[:, 3]).long()

        rank = torch.arange(idx.shape[0],
                            device=cls_target.device,
                            dtype=torch.long)

        # indices 为在id中的位置
        indices = torch.argsort(idx)
        cls_target = cls_target[indices]
        idx, rank = idx[indices], rank[indices]
        lines_pts_dense = lines_pts_dense[indices]
        dxy = dxy[indices]
        """
        过滤单点和双点
        """
        idx = torch.cat([idx,
                        torch.Tensor([-1]).cuda().long()],-1)

        idx_tmp = torch.cat([torch.Tensor([-1]).cuda().long(),
                        idx,], -1)
        # dupli 为sort_id 的位置onehot编号
        idx_mask = (idx_tmp[:-1] - idx_tmp[1:]) != 0 # 格子第一个点位置
        idx_mask[-1] = False
        frist_p = idx_mask.nonzero() 

        mask_1 = idx[frist_p] != idx[frist_p + 1]
        mask_2 = idx[frist_p] == idx[frist_p + 1]
        idx_1 = frist_p[mask_1]
        idx_2 = frist_p[mask_2], (frist_p + 1)[mask_2]

        double2 = idx[idx_2[0]]
        assert idx[idx_1].shape[0] == torch.unique(idx[idx_1]).shape[0]
        assert (idx[idx_2[0]] - idx[idx_2[1]]).abs().sum() == 0
        assert (idx[torch.cat(idx_2)].shape[0]) == \
            (2 * torch.unique(idx[torch.cat(idx_2)]).shape[0])
        assert idx_2[0].shape[0] * 2 + idx_1.shape[0]<=idx.shape[0] - 1
        """
        双点assign
        """
        # 代表格子位置
        voxels_id = idx[idx_2[0]]
        lines_id = idx_2

        # assert 
        assert (idx[lines_id[0]] - idx[lines_id[1]]).abs().sum() == 0
        assert (cls_target[lines_id[0],:] - \
                     cls_target[lines_id[1],:]).abs().sum() == 0

        gt_reg_0 = dxy[lines_id[0]]
        gt_reg_1 = dxy[lines_id[1]]
        gt_reg = torch.stack((gt_reg_0, gt_reg_1), 1) # n x pv x 2 x 2(x, y)
        
        if len(voxels_id) > 0:
            assert voxels_id.max() < dt_lines.shape[0]
        pred_reg = dt_lines[voxels_id] # n x pv x 2 x 2

        gt_reg = gt_reg.reshape(-1, 2, 1, 2, 1, 2)
        pred_reg = pred_reg.reshape(-1, 1, 2, 1, 2, 2)
        # assi  n
        reg_norm = (gt_reg - pred_reg).norm(dim=-1, p=2)
        
        # reg_norm = (gt_reg - pred_reg).norm(dim=-1, p=2).view(-1, 4, 4)
        # s11, s12 = [0, 1], [2, 3]
        # s21, s22 = [0, 3], [1, 2]

        S111 = reg_norm[:, 0, 0, 0, 0] + reg_norm[:, 0, 0, 1, 1]
        S112 = reg_norm[:, 0, 0, 0, 1] + reg_norm[:, 0, 0, 1, 0]
        S121 = reg_norm[:, 1, 1, 0, 0] + reg_norm[:, 1, 1, 1, 1]
        S122 = reg_norm[:, 1, 1, 0, 1] + reg_norm[:, 1, 1, 1, 0]

        S211 = reg_norm[:, 0, 1, 0, 0] + reg_norm[:, 0, 1, 1, 1]
        S212 = reg_norm[:, 0, 1, 0, 1] + reg_norm[:, 0, 1, 1, 0]
        S221 = reg_norm[:, 1, 0, 0, 0] + reg_norm[:, 1, 0, 1, 1]
        S222 = reg_norm[:, 1, 0, 0, 1] + reg_norm[:, 1, 0, 1, 0]

        M1 = [S111 + S121, S111 + S122, S112 + S121, S112 + S122]
        M2 = [S211 + S221, S211 + S222, S212 + S221, S212 + S222]

        M1.extend(M2)
        tmp_reg_norm = torch.stack(M1, 1)
        tmp_assin = torch.tensor(
            [[0, 1, 0, 1, 0, 1], 
            [0, 1, 0, 1, 1, 0], 
            [0, 1, 1, 0, 0, 1], 
            [0, 1, 1, 0, 1, 0], 
            [1, 0, 0, 1, 0, 1],
            [1, 0, 0, 1, 1, 0], 
            [1, 0, 1, 0, 0, 1], 
            [1, 0, 1, 0, 1, 0],],
            dtype=torch.float32).cuda()

        idx_ = tmp_reg_norm.argmin(-1)

        lines_pts_dense[lines_id[0], 2] = tmp_assin[idx_][:, 0]
        lines_pts_dense[lines_id[1], 2] = tmp_assin[idx_][:, 1]

        mask = tmp_assin[idx_][:, 2] == 1
        dxy[lines_id[0][mask]] = torch.flip(dxy[lines_id[0][mask]],dims=[-2])
        
        mask = tmp_assin[idx_][:, 4] == 1
        dxy[lines_id[1][mask]] = torch.flip(dxy[lines_id[1][mask]],dims=[-2])
        # remove replicated lines
        # todo check
        """
        单点assign
        """

        # 代表格子位置
        voxels_id = idx[idx_1]
        lines_id = idx_1

        assert voxels_id.max() < dt_lines.shape[0]

        # gt_reg = lines_pts_dense[lines_id, 6:8]                        
        gt_reg = dxy[lines_id]
        pred_reg = dt_lines[voxels_id] # n x pv x 2

        gt_reg =     gt_reg.reshape(-1, 1, 2, 1, 2)
        pred_reg = pred_reg.reshape(-1, 2, 1, 2, 2)
        # assin
        reg_norm = (gt_reg - pred_reg).norm(dim=-1, p=2)
        S111 = reg_norm[:, 0, 0, 0] + reg_norm[:, 0, 1, 1]
        S112 = reg_norm[:, 0, 0, 1] + reg_norm[:, 0, 1, 0]
        S121 = reg_norm[:, 1, 0, 0] + reg_norm[:, 1, 1, 1]
        S122 = reg_norm[:, 1, 0, 1] + reg_norm[:, 1, 1, 0]

        M1 = [S111, S112, S121, S122]

        tmp_reg_norm = torch.stack(M1, 1)

        tmp_assin = torch.tensor(
            [[0, 1, 0, 1], 
            [0, 1, 1, 0], 
            [1, 0, 0, 1],
            [1, 0, 1, 0],],
            dtype=torch.float32).cuda()
        idx_ = tmp_reg_norm.argmin(-1)



    # lines_pts_dense:  batch_size lines_id pose x y z dx dy type
    # set pos
        lines_pts_dense[lines_id, 2] = tmp_assin[idx_][:, 0]
    # swap points 
        mask = tmp_assin[idx_][:, 2] == 1
        dxy[lines_id[mask]] = torch.flip(dxy[lines_id[mask]],dims=[-2])

        # 恢复lines_pts_dense 顺序
        idx = rank.argsort()
        lines_pts_dense = lines_pts_dense[idx]
        dxy = dxy[idx]
        # 过滤格子多于2个点的gt
        mask = lines_pts_dense[:,2]>=0
        lines_pts_dense = lines_pts_dense[mask, :]
        dxy = dxy[mask, :]
        lines_pts_dense = torch.cat([lines_pts_dense, dxy.view(dxy.shape[0],-1)], -1)
        return lines_pts_dense, double2


    def assert_markv(self, lines_pts_dense):
        N = torch.unique(lines_pts_dense[:,0])
        for b in N:
            b_lines = lines_pts_dense[lines_pts_dense[:, 0] == b, 1:]
            if len(b_lines) == 0:
                continue

            l_max = torch.unique(b_lines[:, 0])
            for l in l_max:
                line = b_lines[b_lines[:,0]==l, 1:] # c h w,l
                if len(line) == 0:
                    continue
                xytype = line[:,[1, 2, -1]]
                assert len(torch.unique(xytype[:,-1])) == 1
                diffxy = xytype[:-1] - xytype[1:]
                diffxyabs = diffxy.abs()
                assert torch.all(diffxyabs[:,:2].min(-1)[0] <= 1)
       

    def assign(self, dt_cls, dt_lines, lines_pts_dense, pv):
        # dt_cls:  batch_size lines_id pose x y z dx dy type
        assert pv == 2
        device = lines_pts_dense.device
        # new_lines_pts_dense = lines_pts_dense.clone()
        lines_pts_dense[:,2] = -1
        # PR = dt_lines.clone()
        dt_cls = dt_cls.permute([0, 2, 3, 1]).contiguous()
        dt_lines = dt_lines.permute([0, 2, 3, 1]).contiguous()
        cls_target = lines_pts_dense[:, [0, 3, 4, 8]] # batch_size x y type
        cls_target = cls_target.long()

        N, H, W, C = dt_cls.shape
        cls_num = int(C / pv)
        dt_cls = dt_cls.view(-1 , pv)
        dt_lines = dt_lines.view(-1, pv, 2)
        reg_num = 1

        idx = (cls_target[:, 0] * W * H * cls_num + \
              cls_target[:, 2] * W * cls_num + \
              cls_target[:, 1] * cls_num + \
              cls_target[:, 3]).long()

        rank = torch.arange(idx.shape[0],
                            device=cls_target.device,
                            dtype=torch.long)

        # indices 为在id中的位置
        indices = torch.argsort(idx)
        cls_target = cls_target[indices]
        idx, rank = idx[indices], rank[indices]
        lines_pts_dense = lines_pts_dense[indices]

        """
        过滤单点和双点
        """
        idx = torch.cat([idx,
                        torch.Tensor([-1]).cuda().long()],-1)

        idx_tmp = torch.cat([torch.Tensor([-1]).cuda().long(),
                        idx,], -1)
        # dupli 为sort_id 的位置onehot编号
        idx_mask = (idx_tmp[:-1] - idx_tmp[1:]) != 0 # 格子第一个点位置
        idx_mask[-1] = False
        frist_p = idx_mask.nonzero() 

        mask_1 = idx[frist_p] != idx[frist_p + 1]
        mask_2 = idx[frist_p] == idx[frist_p + 1]
        idx_1 = frist_p[mask_1]
        idx_2 = frist_p[mask_2], (frist_p + 1)[mask_2]
        double_line_voxels = idx[idx_2[0]]
        # assert idx[idx_1].shape[0] == torch.unique(idx[idx_1]).shape[0]
        # assert (idx[idx_2[0]] - idx[idx_2[1]]).abs().sum() == 0
        # assert (idx[torch.cat(idx_2)].shape[0]) == \
        #     (2 * torch.unique(idx[torch.cat(idx_2)]).shape[0])
        # assert idx_2[0].shape[0] * 2 + idx_1.shape[0]<=idx.shape[0] - 1
        """
        双点assign
        """
        # 代表格子位置
        voxels_id = idx[idx_2[0]] 
        lines_id = idx_2

        # assert 
        # assert (idx[lines_id[0]] - idx[lines_id[1]]).abs().sum() == 0
        # assert (cls_target[lines_id[0],:] - \
        #              cls_target[lines_id[1],:]).abs().sum() == 0

        gt_reg_0 = lines_pts_dense[lines_id[0], 6:8]
        gt_reg_1 = lines_pts_dense[lines_id[1], 6:8]
        gt_reg = torch.stack((gt_reg_0, gt_reg_1),1)
        
        # if len(voxels_id) > 0:
        #     assert voxels_id.max() < dt_lines.shape[0]
        pred_reg = dt_lines[voxels_id] # n x pv x 2

        gt_reg = gt_reg.reshape(-1, 2, 1, 2)
        pred_reg = pred_reg.reshape(-1, 1, 2, 2)
        # assin
        # reg_norm = (gt_reg - pred_reg).norm(dim=3, p=2).view(-1, 4)
        reg_norm = (gt_reg - pred_reg).abs().sum(3).view(-1, 4)
        mask = reg_norm[:,[0, 3]].sum(-1) > reg_norm[:,[1, 2]].sum(-1)

        lines_pts_dense[lines_id[0][mask], 2] = 1
        lines_pts_dense[lines_id[1][mask], 2] = 0
        lines_pts_dense[lines_id[0][~mask], 2] = 0
        lines_pts_dense[lines_id[1][~mask], 2] = 1

        # remove replicated lines
        mask = (reg_norm[:,[0, 1]] - reg_norm[:,[2, 3]]).abs().sum(-1) < 1e-5
        mask_ =  (reg_norm[:,0] <= reg_norm[:,1])
        mask1 = mask & mask_
        mask2 = mask & (~mask_)
        # if mask.sum() > 0:
        #     print(1)
        lines_pts_dense[lines_id[0][mask1], 2] = 0     # 0 <= 1 放置在0位置 
        lines_pts_dense[lines_id[0][mask2], 2] = 1     # 0 > 1放置在1位置
        lines_pts_dense[lines_id[1][mask], 2] = -1    # 另外一个位置删除
        """
        单点assign
        """

        # 代表格子位置
        voxels_id = idx[idx_1]
        lines_id = idx_1

        # if len(voxels_id) > 0:
        #     assert voxels_id.max() < dt_lines.shape[0]

        gt_reg = lines_pts_dense[lines_id, 6:8]        

        pred_reg = dt_lines[voxels_id] # n x pv x 2 bug todo fix
        dt_cls_ = dt_cls[voxels_id]
        gt_reg = gt_reg.reshape(-1, 1, 2)
        pred_reg = pred_reg.reshape(-1, 2, 2)
        # assin
        # reg_norm = (gt_reg - pred_reg).norm(idim=2, p=2).view(-1, 2)
        reg_norm = (gt_reg - pred_reg).abs().sum(2).view(-1, 2)
        
        mask = reg_norm[:, 0] <= reg_norm[:, 1]
        mask1 = dt_cls_[:,0] >= dt_cls_[:,1]
        match_weights = self.train_cfg['matching_weights']
        reg_norm_all = reg_norm / 4 * match_weights[0] + (1 - dt_cls_.sigmoid()) * match_weights[1]
        mask = reg_norm_all[:,0] <= reg_norm_all[:,1]
        lines_pts_dense[lines_id[mask], 2] = 0
        lines_pts_dense[lines_id[~mask], 2] = 1
        if self.map_layer:
            lines_pts_dense[lines_id, 2] = 0
        # 恢复lines_pts_dense 顺序
        idx = rank.argsort()
        lines_pts_dense = lines_pts_dense[idx]
        # 过滤格子多于2个点的gt
        lines_pts_dense = lines_pts_dense[lines_pts_dense[:,2]>=0, :]
        # self.assert_markv(lines_pts_dense)
        return lines_pts_dense, double_line_voxels

    def set_lane_dense_pts_pos(self, dt_cls, dt_lines, lines_pts_dense, pv):
        # dt_cls:  batch_size lines_id pose x y z dx dy type
        device = lines_pts_dense.device
        new_lines_pts_dense = lines_pts_dense.clone()

        dt_cls = dt_cls.permute([0, 2, 3, 1]).contiguous()
        dt_lines = dt_lines.permute([0, 2, 3, 1]).contiguous()
        cls_target = lines_pts_dense[:, [0, 3, 4, 8]] # batch_size x y type
        cls_target = cls_target.long()

        N, H, W, C = dt_cls.shape
        cls_num = int(C / pv)
        dt_cls = dt_cls.view(-1 , pv)
        dt_lines = dt_lines.view(-1, pv, 2)
    
        reg_num = 1

        idx = (cls_target[:, 0] * W * H * cls_num + \
              cls_target[:, 2] * W * cls_num + \
              cls_target[:, 1] * cls_num + \
              cls_target[:, 3]).long()

        rank = torch.arange(idx.shape[0],
                            device=cls_target.device,
                            dtype=torch.long)

        # indices 为在id中的位置
        indices = torch.argsort(idx)
        cls_target = cls_target[indices]
        idx, rank = idx[indices], rank[indices]
        lines_pts_dense = lines_pts_dense[indices]
        """
        双点assign
        """
        # dupli 为sort_id 的位置onehot编号
        idx_mask = (idx[:-1] - idx[1:]) == 0
        dupli = idx_mask.nonzero() 

        # 代表格子位置
        voxels_id = idx[dupli]
        lines_id = [dupli, dupli + 1]

        # assert 
        assert (idx[lines_id[0]] - idx[lines_id[1]]).abs().sum() == 0
        assert (cls_target[lines_id[0],:] - \
                     cls_target[lines_id[1],:]).abs().sum() == 0

        gt_reg_0 = lines_pts_dense[lines_id[0], 6:8]
        gt_reg_1 = lines_pts_dense[lines_id[1], 6:8]
        gt_reg = torch.stack((gt_reg_0, gt_reg_1),1)
        
        assert voxels_id.max() < dt_lines.shape[0]
        pred_reg = dt_lines[voxels_id] # n x pv x 2

        gt_reg = gt_reg.reshape(-1, 2, 1, 2)
        pred_reg = pred_reg.reshape(-1, 1, 2, 2)
        # assin
        reg_norm = (gt_reg - pred_reg).norm(dim=3, p=2).view(-1, 4)
        mask = reg_norm[:,[0, 3]].sum(-1) > reg_norm[:,[1, 2]].sum(-1)

        lines_pts_dense[lines_id[0][mask], 2] = 1
        lines_pts_dense[lines_id[1][mask], 2] = 0
        lines_pts_dense[lines_id[0][~mask], 2] = 0
        lines_pts_dense[lines_id[1][~mask], 2] = 1

        """
        单点assign
        """
        # 筛选单点
        mask_uni = torch.ones(new_lines_pts_dense.shape[0],device=device, dtype=bool)
        mask_uni[lines_id[0]] = False
        mask_uni[lines_id[1]] = False

        # 代表格子位置
        voxels_id = idx[mask_uni]
        lines_id = mask_uni.nonzero()    # mask

        gt_reg = lines_pts_dense[lines_id, 6:8]        

        pred_reg = dt_lines[voxels_id] # n x pv x 2

        gt_reg = gt_reg.reshape(-1, 1, 2)
        pred_reg = pred_reg.reshape(-1, 2, 2)
        # assin
        reg_norm = (gt_reg - pred_reg).norm(dim=2, p=2).view(-1, 2)
        mask = reg_norm[:,[0]].sum(-1) > reg_norm[:,[1]].sum(-1)

        lines_pts_dense[lines_id[mask], 2] = 1
        lines_pts_dense[lines_id[~mask], 2] = 0

        # 恢复lines_pts_dense 顺序
        idx = rank.argsort()
        lines_pts_dense = lines_pts_dense[idx]
        return lines_pts_dense

    def dense_pts_filter(self, lines_pts_dense, preds_per_voxel):
        # 格子有大于pv点全部删除了
        cls_target = lines_pts_dense[:, [0, 3, 4, 8]]
        mask_delete = torch.ones(lines_pts_dense.shape[0], dtype=bool)

        uni, count = torch.unique(cls_target, sorted=False, return_counts=True, dim=0)
        mask = (count > preds_per_voxel)

        if(mask.sum() == 0):
            return lines_pts_dense

        to_check = uni[mask]
        for i in range(mask.sum()):
            index = torch.where((((cls_target == to_check[i]).sum(-1)) == 4) == True)[0]
            mask_delete[index] = False

        return lines_pts_dense[mask_delete, :]

    def in_range(self, gt_dense_pts, pts_range):
        pts_mask = np.logical_and(gt_dense_pts[:, 1] >= pts_range[0], gt_dense_pts[:, 1] <= pts_range[3])
        pts_mask = np.logical_and(pts_mask, gt_dense_pts[:, 2] <= pts_range[4])
        pts_mask = np.logical_and(pts_mask, gt_dense_pts[:, 2] >= pts_range[1])

        return pts_mask

    def outlier_gt_filter(self, gt_dense_pts, pts_range):
        if len(gt_dense_pts) == 0:
            return gt_dense_pts
        lines_num = int(np.max(gt_dense_pts[:, 0]) + 1)

        pts_mask = self.in_range(gt_dense_pts, pts_range)
        new_gt_dense_pts = np.zeros(gt_dense_pts.shape, dtype=np.float32)

        id_count = 0
        filled_size = 0
        for i in range(lines_num):
            pos_mask = (gt_dense_pts[:, 0] == i)
            if (pts_mask[pos_mask].sum() == 0):
                continue

            lines_gt = gt_dense_pts[pos_mask, :]
            lines_gt[:, 0] = id_count
            id_count += 1

            circle = (np.abs(lines_gt[0, 1] - lines_gt[-1, 1])**2 + np.abs(lines_gt[0, 2] - lines_gt[-1, 2])**2) < 1
            if(circle and
              (pts_mask[pos_mask].sum() != pos_mask.sum()) and
              self.in_range(lines_gt[0, :].reshape(1, -1), pts_range)[0]):     #环形 且 存在越界部分 且 起点在范围内
                first_outside = np.where(~pts_mask[pos_mask])[0][0]
                lines_gt = np.concatenate([lines_gt[first_outside: -1, :],
                                           lines_gt[:first_outside, :],
                                           lines_gt[first_outside, :].reshape(1, -1)])

            new_gt_dense_pts[filled_size: filled_size + len(lines_gt), :] = lines_gt
            filled_size += len(lines_gt)

        new_gt_dense_pts = new_gt_dense_pts[:filled_size]
        return new_gt_dense_pts

    def get_targets_single(self, gt_maps_3d, k_max=1e5):
        """Generate training targets for a single sample.

        Args:
            gt_bboxes_3d (:obj:`LiDARInstance3DBoxes`): Ground truth gt boxes.
            gt_labels_3d (torch.Tensor): Labels of boxes.

        Returns:
            tuple[list[torch.Tensor]]: Tuple of target including
                the following results in order.

                - list[torch.Tensor]: Heatmap scores.
                - list[torch.Tensor]: Ground truth boxes.
                - list[torch.Tensor]: Indexes indicating the position
                    of the valid boxes.
                - list[torch.Tensor]: Masks indicating which boxes
                    are valid.
        """
        # lines_pts_dense:  batch_size lines_id pose x y z dx dy type
        # gt_maps_3d: lines_id x y z lines_type type
        device = gt_maps_3d.device
        
        max_objs = int(self.train_cfg.get('max_objs', k_max))
        vs = np.array(self.train_cfg['voxel_size']) * self.train_cfg['out_size_factor']
        pcr = np.array(self.pcr).astype(np.float32)
        gt_pts = gt_maps_3d.cpu().numpy()
        if gt_pts.shape[1] > 5:
            gt_pts = gt_pts[:,[0,1,2,4,5]]
        gt_pts = gt_pts.astype(np.float32)
        gt_pts = self.outlier_gt_filter(gt_pts, pcr)
        dense_pts = np.zeros((max_objs, 8), dtype=np.float32)


        size = VI.gt_lines_voxel_interpolation(gt_pts, vs, pcr, 
                                            max_objs, dense_pts)
        dense_pts = dense_pts[:size, :]
        return dense_pts
        # [id, position, x, y, z, d1, d2, cls]

    def get_markv_target(self, target_heatmap, ind):
        k_markv = self.k_markv[:,self.crop[2]:self.crop[3],self.crop[0]:self.crop[1]]
        b_markv = self.b_markv[:,self.crop[2]:self.crop[3],self.crop[0]:self.crop[1]]
        N = target_heatmap.shape[0]
        pred1 = target_heatmap.clone()
        pred2 = target_heatmap.clone()

        pred1 = k_markv * pred1 + b_markv
        pred2 = k_markv * pred2 + b_markv
        pred1 = pred1.clamp(min=1e-5, max=1-1e-6)
        pred2 = pred2.clamp(min=1e-5, max=1-1e-6)

        for b in range(N):

            b_lines = ind[ind[:, 0] == b, 1:]
            if len(b_lines) == 0:
                continue

            l_max =b_lines[:, -1].max() + 1
            for l in range(l_max):
                line = b_lines[b_lines[:,-1]==l, :] # c h w,l
                if len(line) == 0:
                    continue
                cls1 = pred1[b][line[:,0],line[:,1],line[:,2]]
                cls2 = pred2[b][line[:,0],line[:,1],line[:,2]]
                # t = target[b][line[:,0],line[:,1],line[:,2]]

                pred1[b][line[:,0],line[:,1],line[:,2]] = cls1.cumprod(0)
                pred2[b][line[:,0],line[:,1],line[:,2]] = cls2.flip(0).cumprod(0).flip(0) 

        return (pred1 + pred2) / 2

    @staticmethod
    def get_cross(target, gt_s1, gt_s2, id1, id2, eps=1e-8):

        tmp1 = gt_s1[:, id1] - gt_s2[:, id2]
        mask1 = (tmp1.abs() - 1).abs() < eps

        tmp11 = (tmp1 < 0) & mask1
        tmp12 = (tmp1 > 0) & mask1

        target[tmp11, id1] = -1
        target[tmp12, id1] =  1

        target[tmp12, id2 + 2] = -1
        target[tmp11, id2 + 2] =  1

        target[mask1, 4] = 1
        return target

    @staticmethod
    def get_add(target, gt_s1, gt_s2, id1, id2, eps=1e-8):

        tmp1 = gt_s1[:, id1] + gt_s2[:, id2]
        mask1 = (tmp1.abs() - 3).abs() < eps

        target[mask1, id1] = 1
        target[mask1, id2 + 2] = 1

        target[mask1, 4] = 3
        return target

    @staticmethod
    def get_norm2(target, gt_s1: torch.Tensor, gt_s2:torch.Tensor, id1, id2, eps=1e-8):

        tmp1 = gt_s1.view(-1,2,2)[:, id1] - gt_s2.view(-1,2,2)[:, id2]
        tmp1 = tmp1.norm(dim=-1,p=2)
        mask0 = tmp1 < eps
        mask1 = (tmp1 - 1).abs() <eps
        mask = (mask0 | mask1)
        target[mask, 0] = id1
        target[mask, 1] = id2
        target[mask, 3:4] = 1
        target[mask0, 2] = 0
        target[mask1, 2] = 1

        return target

    def get_distance_target_4p(self,
                            target_map: torch.Tensor, 
                            ind: torch.Tensor,
                            MaxLinesK: int =1000):
        # ind: N x 5 (batch_size ,c , h, w, line_id)
        # target_map: N x 2
        lines_id = ind[:, 0] * MaxLinesK + ind[:, -1]
        # lines_id_unique = lines_id.unique()

        gt_line = target_map
        gt_s1, gt_s2 = gt_line[:-1], gt_line[1:]
        id_s1, id_s2 = lines_id[:-1], lines_id[1:]
        target = gt_s1.new_zeros(size=(gt_s1.shape[0], 4))

        # cross div.abs() == 1
        # 0, 1 | 1, 0
        # 1, 0 | 0, 1
        target = self.get_norm2(target, gt_s1, gt_s2, 1, 0)
        target = self.get_norm2(target, gt_s1, gt_s2, 0, 1)
        # add == 3 
        # 1, 0 | 0, 1
        # 1, 0 | 0, 1

        target = self.get_norm2(target, gt_s1, gt_s2, 0, 0)
        target = self.get_norm2(target, gt_s1, gt_s2, 1, 1)
        target[id_s1!=id_s2,:] = 0
        return target
    def get_distance_aux_loss_4p(self,
                            pred_line: torch.Tensor, 
                            target: torch.tensor,
                            w = 1,
                            eps=0.1):
        # pred_line: N * 2
        # target: N * 5 
        pred_s1, pred_s2 = pred_line[:-1], pred_line[1:]
        i = torch.arange(0, len(target),device=target.device,dtype=target.dtype)
        target[:,2] += i * 2
        idx = target[:,:2].long()
        pred_s1 = pred_s1.view(-1, 2)[idx[:,0]]
        pred_s2 = pred_s2.view(-1, 2)[idx[:,1]]
        tmp = (pred_s1 - pred_s2).norm(dim=-1, p=2)

        loss = (tmp - target[:,2]) * target[:, 3]
        loss = self.smooth_l1_loss(loss, beta=self.aux_l1_beta)
        loss =  loss * w
        if self.dis_markv:

            markv_c = torch.clamp(loss - eps, min=0)

            idx = (target[:,-1] == 0).nonzero().squeeze(-1)
            id_s = 0
            for id_n in idx:
                markv_c1 = markv_c[id_s:id_n].cumsum(0)
                markv_c2 = markv_c[id_s:id_n].flip(0).cumsum(0).flip(0) 
                markv_c[id_s: id_n] = (markv_c1 + markv_c2) / 2
                id_s = id_n + 1
            loss = loss + markv_c
        # if loss.isnan().sum() > 0:
        #     print(1)
        loss = loss.sum() / max((loss>0).sum(), 1)
        
        return loss
    def get_distance_target(self,
                            target_map: torch.Tensor, 
                            ind: torch.Tensor,
                            MaxLinesK: int =1000):
        # ind: N x 5 (batch_size ,c , h, w, line_id)
        # target_map: N x 2
        lines_id = ind[:, 0] * MaxLinesK + ind[:, -1]
        # lines_id_unique = lines_id.unique()

        gt_line = target_map
        gt_s1, gt_s2 = gt_line[:-1], gt_line[1:]
        id_s1, id_s2 = lines_id[:-1], lines_id[1:]
        target = gt_s1.new_zeros(size=(gt_s1.shape[0], 5))

        # cross div.abs() == 1
        # 0, 1 | 1, 0
        # 1, 0 | 0, 1
        target = self.get_cross(target, gt_s1, gt_s2, 1, 0)
        target = self.get_cross(target, gt_s1, gt_s2, 0, 1)
        # add == 3 
        # 1, 0 | 0, 1
        # 1, 0 | 0, 1

        target = self.get_add(target, gt_s1, gt_s2, 0, 0)
        target = self.get_add(target, gt_s1, gt_s2, 1, 1)
        target[id_s1!=id_s2, :] = 0
        return target

    def direction_loss(self, pred_line, target):
        pass
    @staticmethod
    def smooth_l1_loss(diff, beta):
        if beta < 1e-5:
            loss = torch.abs(diff)
        else:
            n = torch.abs(diff)
            loss = torch.where(n < beta, 0.5 * n ** 2 / beta, n - 0.5 * beta)

        return loss
    def get_distance_aux_loss(self,
                            pred_line: torch.Tensor, 
                            target: torch.tensor,
                            w = 1,
                            eps=0.1):
        # pred_line: N * 2
        # target: N * 5 
        pred_s1, pred_s2 = pred_line[:-1], pred_line[1:]
        tmp = torch.cat((pred_s1,pred_s2), -1)
        loss = (tmp * target[:, :4]).sum(-1) - target[:,4]
        loss = self.smooth_l1_loss(loss, beta=self.aux_l1_beta)
        loss =  loss * w
        if self.dis_markv:

            markv_c = torch.clamp(loss - eps, min=0)

            idx = (target[:,-1] == 0).nonzero().squeeze(-1)
            id_s = 0
            for id_n in idx:
                markv_c1 = markv_c[id_s:id_n].cumsum(0)
                markv_c2 = markv_c[id_s:id_n].flip(0).cumsum(0).flip(0) 
                markv_c[id_s: id_n] = (markv_c1 + markv_c2) / 2
                id_s = id_n + 1
            loss_dis_markv = markv_c
        # if loss.isnan().sum() > 0:
        #     print(1)
        loss = loss.sum() / max((loss>0).sum(),1)
        loss_dis_markv = loss_dis_markv.sum() / max(len(loss_dis_markv),1)
        return loss, loss_dis_markv

    # def get_double_lane(self, hotmaps):
    #     kernal = torch.ones(size=(3,3,2,1),device=hotmaps.device,dtype=hotmaps.detype)
        

    @staticmethod
    def assert_distance_aux_target(target: torch.Tensor,
                                    ind: torch.Tensor,
                                    target_map: torch.Tensor):
        for i in range(ind.shape[0]-1):
            idx1, idx2 = ind[i], ind[i + 1]
            line1, line2 = target_map[i], target_map[i + 1]
            if (idx1[0] != idx2[0]) or (idx1[4] != idx2[4]):
                assert target[i].any() == 0
            else:
                s1 = (target[i,:2] * line1).sum()
                s2 = (target[i, 2:4] * line2).sum()
                assert s1 + s2 == target[i, 4]

    def aug_bev_range(self,):
        if self.train_cfg.get('crop_arfa', None) is not None:
            crop_arfa = self.train_cfg.get('crop_arfa')
            vs = np.array(self.train_cfg['voxel_size']) * self.train_cfg['out_size_factor']
            # feature_map_size = self.grid_size[:2] // self.train_cfg['out_size_factor']
            pcr = copy.deepcopy(self.train_cfg['point_cloud_range'])
            xl = pcr[0] * np.random.uniform(crop_arfa[0][0], crop_arfa[0][1])
            xu = pcr[3] * np.random.uniform(crop_arfa[1][0], crop_arfa[1][1])
            xl = (xl // (vs[0] * 8)) * 8 * vs[0]
            xu = (xu // (vs[0] * 8)) * 8 * vs[0]
            self.grid_size[0] = int(round((xu - xl) / self.train_cfg['voxel_size'][0]))
            self.pcr[0], self.pcr[0+3] = xl, xu
            crop = [0,0,0,0]
            crop[0] = round((xl - pcr[0]) / vs[0])
            crop[1] = round((xu - pcr[0]) / vs[0])
            crop[2] = 0
            crop[3] = round((pcr[4] - pcr[1]) / vs[1])
            self.crop = crop
    # def pred_aug_crop(self, preds_dicts):
    def loss(self, gt_maps_3d, preds_dicts, **kwargs):
        """Loss function for CenterHead.
        Args:
            gt_bboxes_3d (list[:obj:`LiDARInstance3DBoxes`]): Ground
                truth gt boxes.
            gt_labels_3d (list[torch.Tensor]): Labels of boxes.
            preds_dicts (dict): Output of forward function.

        Returns:
            dict[str:torch.Tensor]: Loss of heatmap and bbox of each task.
        """
        self.aug_bev_range()
        for pred in preds_dicts:
            for p in pred:
                for k in p:
                    p[k] = p[k][:,:,self.crop[2]:self.crop[3],self.crop[0]:self.crop[1]]
        with torch.no_grad():
            heatmaps, inds, map_regs, double_lines,lane_type_heatmaps = self.get_targets(
                gt_maps_3d, preds_dicts)
        loss_dict = dict()
        code_weights = self.train_cfg.get('code_weights',[1.0, 1.0])
        markv_weights = self.train_cfg.get('markv_weights',0)
        aux_weights = self.train_cfg.get('aux_weights',1.0)
        cls_weights = self.train_cfg.get('cls_weights',1.0)
        reg_std_weights = self.train_cfg.get('reg_std_weights',1.0)
        is_markv = self.train_cfg.get('markv', False)
        lines_2_weights = self.train_cfg.get('lines_2_weights',1.0)
        lines_2 = self.train_cfg.get('lines_2',False)
        lines_2_mask = self.train_cfg.get('lines_2_mask',[False for i in range(len(preds_dicts))])
        lane_type = self.train_cfg.get('lane_type', False)
        lane_type_weights = self.train_cfg.get('lane_type_weights',1.0)
        lane_type_mask = self.train_cfg.get('lane_type_mask',[False for i in range(len(preds_dicts))])
        dis_markv_weight = self.train_cfg.get('dis_markv_weight', 1.0)
        if not self.task_specific:
            loss_dict['loss'] = 0
        for task_id, preds_dict in enumerate(preds_dicts):
            # prepare for loss
            _heatmaps = clip_sigmoid(preds_dict[0]['heatmap'])
            if task_id !=7:
                preds_dict[0]['heatmap'] = _heatmaps[:,:2]
            else:
                preds_dict[0]['heatmap'] = _heatmaps[:,:8]
            N,C,H,W = preds_dict[0]['heatmap'].shape
            ind = inds[task_id] # N c H W, l
            target_heatmap = preds_dict[0]['heatmap']

            # heatmap focal loss
            num_pos = heatmaps[task_id].eq(1).float().sum().item()
            cls_avg_factor = torch.clamp(
                reduce_mean(heatmaps[task_id].new_tensor(num_pos)),
                min=1).item()
            
            loss_heatmap = self.loss_cls(
                preds_dict[0]['heatmap'],
                heatmaps[task_id],
                avg_factor=cls_avg_factor)
            # pred is double line
            if lines_2 and lines_2_mask[task_id]:
                _lines_2_weight = heatmaps[task_id].max(1)[0].unsqueeze(1)
                loss_doublelines = self.loss_cls(
                    _heatmaps[:,2:3]*_lines_2_weight,
                    double_lines[task_id],
                    avg_factor=max(_lines_2_weight.sum(), 1))
                loss_dict[f'task_map_{task_id}.loss_lines_num'] = loss_doublelines * lines_2_weights
            # pred line type eg: soild dash
            if lane_type and lane_type_mask[task_id]:
                loss_lane_type = self.loss_cls(
                    _heatmaps[:,3:5]*heatmaps[task_id],
                    lane_type_heatmaps[task_id],
                    avg_factor=num_pos,)
                loss_dict[f'task_map_{task_id}.loss_lane_type'] = loss_lane_type * lane_type_weights
            ## markv loss
            if is_markv:
                if len(ind) > 0:
                    target_heatmap = self.get_markv_target(target_heatmap, ind)
                loss_markv = self.loss_cls(
                    target_heatmap,
                    heatmaps[task_id],
                    avg_factor=cls_avg_factor)
                loss_dict[f'task_map_{task_id}.loss_markv'] = loss_markv * markv_weights

            target_map = map_regs[task_id]
            target_map = target_map.unsqueeze(0)

            # Regression loss for r1 r2
            pred = preds_dict[0]['reg'].permute(0, 2, 3, 1).contiguous()
            pred = pred.clamp(min=0.0, max=4.0)
            
            # tor
            N,C,H,W = preds_dict[0]['heatmap'].shape
            bbox_weights = torch.Tensor(code_weights).cuda()
            # pred = pred.view(N, H, W, -1, 2)
            C = 2
            num_reg = int(pred.shape[-1]/C)
            pred = pred.view(1, -1, num_reg)
            if len(ind) == 0:
                ind = torch.zeros(size=(1,4),device=pred.device,dtype=torch.long)
                target_map = torch.zeros(size=(1,1,num_reg),device=pred.device,dtype=pred.dtype)
                bbox_weights = bbox_weights * 0
            ind_ = ind[:, 0] * H * W * C + \
                   ind[:, 2] * W * C + \
                   ind[:, 3] * C + \
                   ind[:, 1]
            ind_ = ind_.view(1, -1)


            # assert ind_.max() < pred.shape[1]
            pred = self._gather_feat(pred, ind_)
            if len(ind) == 0:
                pred[:] = 0
            # distance aux loss
            if self.distance_aux:
                if num_reg==2:
                    target = self.get_distance_target(target_map.clone().squeeze(0), ind)
                    # self.assert_distance_aux_target(target, ind, target_map.squeeze(0))
                    distance_aux_loss,distance_aux_loss_markv = self.get_distance_aux_loss(target=target,
                                                                pred_line=pred.clone().squeeze(0),
                                                                )
                else:
                    target = self.get_distance_target_4p(target_map.clone().squeeze(0), ind)
                    # self.assert_distance_aux_target(target, ind, target_map.squeeze(0))
                    distance_aux_loss,distance_aux_loss_markv = self.get_distance_aux_loss_4p(target=target,
                                                                pred_line=pred.clone().squeeze(0),
                                                                )
                loss_dict[f'task_map_{task_id}.loss_dis_aux'] = distance_aux_loss * aux_weights
                if self.dis_markv:
                    loss_dict[f'task_map_{task_id}.loss_dis_markv'] = distance_aux_loss_markv * dis_markv_weight

            loss_map = self.loss_map(
                        pred,
                        target_map,
                        bbox_weights,
                    avg_factor=(num_pos + 1e-4))
            if self.reg_std:
                diff = (pred - target_map)
                diff = diff - diff.mean()
                diff = diff.pow(2)
                loss_std = diff.mean()
                # loss_map = (pred - target_map).std()
                loss_dict[f'task_map_{task_id}.reg_std'] = loss_std * reg_std_weights
            # if bbox_weights.sum() == 0:
            #     assert distance_aux_loss == 0
            #     assert loss_map==0
            # if loss_map > 10:
            #     print(1)
            #     import pdb; pdb.set_trace()
            if self.task_specific:
                loss_dict[f'task_map_{task_id}.loss_reg'] = loss_map 
                loss_dict[f'task_map_{task_id}.loss_heatmap'] = loss_heatmap * cls_weights
            else:
                loss_dict['loss'] += loss_map
                loss_dict['loss'] += loss_heatmap
        return loss_dict

    def get_maps(self, preds_dicts, img_metas, img=None, rescale=False):
        """Generate bboxes from bbox head predictions.

        Args:
            preds_dicts (tuple[list[dict]]): Prediction results.
            img_metas (list[dict]): Point cloud and image's meta info.

        Returns:
            list[dict]: Decoded bbox, scores and labels after nms.
        """
        return self.get_result(preds_dicts)
    
    @staticmethod
    def decode_one_voxels(dt_pts):
        dx = (dt_pts <= 1) * 0 + \
             ((1 < dt_pts) & (dt_pts <= 2)) * (dt_pts - 1) + \
             ((2 < dt_pts) & (dt_pts <= 3)) * 1 + \
             (3 < dt_pts) * (4 - dt_pts)
        dy = (dt_pts <= 1) * dt_pts + \
             ((1 < dt_pts) & (dt_pts <= 2)) * 1 + \
             ((2 < dt_pts) & (dt_pts <= 3)) * (3 - dt_pts) + \
             (3 < dt_pts) * 0
        return dx, dy

    def decode_lines_py(self, dt_cls, dt_pts, vs, pcr, ppv=2, k_max=4096*2):
        feat = dt_cls[...,2:]
        dt_cls = dt_cls[...,:2]
        dt_cls = dt_cls.contiguous() 
        feat = feat.contiguous() 
        N, H, W, C = dt_cls.shape

        xs = torch.arange(0, W, 1, dtype=torch.float32).cuda()
        ys = torch.arange(0, H, 1, dtype=torch.float32).cuda()

        xs = xs * vs[0] + pcr[0]
        ys = ys * vs[1] + pcr[1]
        ys, xs = torch.meshgrid([ys, xs])  # [x_grid, y_grid]
        xys = torch.stack((xs, ys), -1)

        # nxys = xys.unsqueeze(0).repeat(2,1,1,1)
        d0x, d0y = self.decode_one_voxels(dt_pts[..., 0])
        d1y, d1x = self.decode_one_voxels(dt_pts[..., 1])

        dxy = torch.stack([d0x, d0y, d1x, d1y], -1)
        dxy = dxy.reshape(N, H, W, C, ppv, 2) 
        xys = xys.reshape(1, H, W, 1, 1, 2)

        dxy *=  torch.tensor(vs[:2]).cuda() # bugfix
        dxy = dxy + xys
        dxy = dxy.view(N, -1, ppv, 2)
        # dxy *=  torch.tensor(vs[:2]).cuda()
        dt_cls = dt_cls.view(N, -1)
        feat = feat.unsqueeze(3).repeat(1,1,1,2,1)
        feat = feat.view(N,-1, feat.shape[-1])

        class_num = C/ppv
        pts_cls = torch.arange(class_num).cuda().repeat(N, H, W, ppv).view(N, -1)

        scores, indices = dt_cls.topk(k_max, dim=1, largest=True, sorted=True)
        lines_points = dxy[torch.arange(N).view(N, -1).repeat(1, k_max), indices]
        lines_cls = pts_cls[torch.arange(N).view(N, -1).repeat(1, k_max), indices]
        feat = feat[torch.arange(N).view(N, -1).repeat(1, k_max), indices]
        return scores, lines_points, lines_cls, feat

    def get_result(self, preds_dicts):
        # grid_size = torch.tensor(self.test_cfg['grid_size'])
        pcr = torch.tensor(self.test_cfg['point_cloud_range'])
        vs = np.array(self.test_cfg['voxel_size']) * self.test_cfg['out_size_factor']
        results = []
        for task_id, preds_dict in enumerate(preds_dicts):
            # prepare for loss
            # preds_dict[0]['heatmap'] = torch.clamp(min=-3.0, max=3.0)
            map_cls = clip_sigmoid(preds_dict[0]['heatmap'])
            map_cls = map_cls.permute(0, 2, 3, 1).contiguous()
            map_reg = preds_dict[0]['reg'].permute(0, 2, 3, 1).contiguous()

            N, H, W, C = map_cls.shape
            map_reg = map_reg.view(N, H, W, 2, -1)

            # dt_cls = lane_utils.lane_detection_range_filter(dt_cls, self.lane_detection_range, vs, pcr)
            scores, lines_points, lines_cls, feat = self.decode_lines_py(map_cls, map_reg, vs, pcr)
            # scores = torch.cat([scores.unsqueeze(-1),feat],-1)
            # if task_id == 7:
            #     print(1)
            mask = scores > self.test_cfg['thr'][task_id]
            scores = torch.cat([scores[mask].unsqueeze(-1),feat[mask]],-1)
            result = {'maps_points': lines_points[mask],
                    'maps_scores': scores,
                    'maps_class': (lines_cls + task_id)[mask],}
            results.append(result)
        return [results]
