# Copyright (c) Phigent Robotics. All rights reserved.
import torch
import torch.nn.functional as F
from mmcv.runner import force_fp32

from mmdet3d.ops.bev_pool_v2.bev_pool import TRTBEVPoolv2
from mmdet.models import DETECTORS
from .. import builder
from .bevdet_fusion_4d import BEVDet4d_Fusion
from torch.cuda.amp import autocast
from typing import Tuple, List, Dict
from mmdet3d.ops.voxel_interpolation import voxel_interpolation_cpu as VI
import numpy as np
import torch
import torch.nn as nn

@DETECTORS.register_module()
class BEVDet4d_test(BEVDet4d_Fusion):
    def __init__(self, 
        **kwargs):
        super(BEVDet4d_test, self).__init__(
            **kwargs)
        self.conv = [nn.Conv2d(17, 16,kernel_size=1,bias=False),
                     nn.BatchNorm2d(16),
                     nn.ReLU(inplace=True)]
        self.conv = nn.Sequential(*self.conv)

    def in_range(self, gt_dense_pts, pts_range):
        pts_mask = np.logical_and(gt_dense_pts[:, 1] >= pts_range[0], gt_dense_pts[:, 1] <= pts_range[3])
        pts_mask = np.logical_and(pts_mask, gt_dense_pts[:, 2] <= pts_range[4])
        pts_mask = np.logical_and(pts_mask, gt_dense_pts[:, 2] >= pts_range[1])

        return pts_mask

    def outlier_gt_filter(self, gt_dense_pts, pts_range):
        lines_num = int(np.max(gt_dense_pts[:, 0]) + 1)

        pts_mask = self.in_range(gt_dense_pts, pts_range)
        new_gt_dense_pts = np.zeros(gt_dense_pts.shape, dtype=np.float32)

        id_count = 0
        filled_size = 0
        for i in range(lines_num):
            pos_mask = (gt_dense_pts[:, 0] == i)
            if (pts_mask[pos_mask].sum() == 0):
                continue

            lines_gt = gt_dense_pts[pos_mask, :]
            lines_gt[:, 0] = id_count
            id_count += 1

            circle = (np.abs(lines_gt[0, 1] - lines_gt[-1, 1])**2 + np.abs(lines_gt[0, 2] - lines_gt[-1, 2])**2) < 1
            if(circle and
              (pts_mask[pos_mask].sum() != pos_mask.sum()) and
              self.in_range(lines_gt[0, :].reshape(1, -1), pts_range)[0]):     #环形 且 存在越界部分 且 起点在范围内
                first_outside = np.where(~pts_mask[pos_mask])[0][0]
                lines_gt = np.concatenate([lines_gt[first_outside: -1, :],
                                           lines_gt[:first_outside, :],
                                           lines_gt[first_outside, :].reshape(1, -1)])

            new_gt_dense_pts[filled_size: filled_size + len(lines_gt), :] = lines_gt
            filled_size += len(lines_gt)

        new_gt_dense_pts = new_gt_dense_pts[:filled_size]
        return new_gt_dense_pts
    def get_targets_single(self, gt_maps_3d, k_max=1e5):

        device = gt_maps_3d.device

        max_objs = int(self.train_cfg['maps'].get('max_objs', k_max))
        vs = np.array(self.train_cfg['maps']['voxel_size']) * self.train_cfg['maps']['out_size_factor']
        pcr = self.train_cfg['maps']['point_cloud_range']
        gt_pts = gt_maps_3d.cpu().numpy()
        gt_pts = gt_pts.astype(np.float32)
        gt_pts = self.outlier_gt_filter(gt_pts, pcr)
        dense_pts = np.zeros((max_objs, 8), dtype=np.float32)


        size = VI.gt_lines_voxel_interpolation(gt_pts, vs, pcr, 
                                            max_objs, dense_pts)
        dense_pts = dense_pts[:size, :]
        return dense_pts
    
    # def extract_img_feat(img,**kwargs):
    #     pass
    def extract_img_feat(self, img,  **kwargs):
 
        # img_feats,_,depth = super().extract_img_feat(img, img_metas, **kwargs)
        # img_feats =  self.pre_process_img(img_feats[0]) # list
        feat = torch.cat(img[0], 1)
        # torch.cat([feat, ], 1)
        img_feats = self.conv(feat)
        return [img_feats], [None]


    def forward_train(self, **kwargs):
        gt_boxes_3d = kwargs['gt_bboxes_3d']
        gt_labels_3d = kwargs['gt_labels_3d']
        gt_maps_3d = kwargs['gt_maps_3d']

        maps = []
        coord = []
        for gt_maps in gt_maps_3d:
            map = torch.zeros(1,128,128, 3, 2).cuda()
            maps_dense = self.get_targets_single(gt_maps)
            mask = np.random.rand(maps_dense.shape[0]) < 1
            maps_dense = maps_dense[mask]
            maps_dense = torch.from_numpy(maps_dense).cuda()
            
            # x,y,z,r1,r2,cls = [2,3,5,6,7]
            x,y = maps_dense[:,2], maps_dense[:,3]
            reg = maps_dense[:,[5,6]]
            cls = maps_dense[:,7]
            map = map.view(-1,2)
            idx = y * 128 * 3 + x * 3 + cls
            idx = idx.long()
            map[idx] = reg
            maps.append(map.view(1,128,128, -1))

            coor = self.align_anchor[...,:2].unsqueeze(0).permute(0,3,1,2)
            coord.append(coor)
        coord = torch.cat(coord, 0)
        maps = torch.cat(maps, 0)
        boxes = []
        grid_size = torch.tensor(self.train_cfg['pts']['grid_size'])
        pc_range = torch.tensor(self.train_cfg['pts']['point_cloud_range'])
        voxel_size = torch.tensor(self.train_cfg['pts']['voxel_size'])
        feature_map_size = grid_size[:2] // self.train_cfg['pts']['out_size_factor']
        for task_boxes,labels in zip(gt_boxes_3d,gt_labels_3d):
            task_boxes = task_boxes.tensor
            box = torch.zeros(1, 128, 128, 9).cuda()
            num_objs =task_boxes.shape[0]
            for idx in range(num_objs):
                cls_id = labels[idx]
                
                # be really careful for the coordinate system of
                # your box annotation.
                x, y, z = task_boxes[idx][0], task_boxes[idx][
                        1], task_boxes[idx][2]

                coor_x = (
                    x - pc_range[0]
                ) / voxel_size[0] / self.train_cfg['pts']['out_size_factor']
                coor_y = (
                    y - pc_range[1]
                ) / voxel_size[1] / self.train_cfg['pts']['out_size_factor']

                center = torch.tensor([coor_x, coor_y],
                                        dtype=torch.float32,
                                        device=box.device)
                center_int = center.to(torch.int32)
                x, y = center_int[0], center_int[1]
                # throw out not in range objects to avoid out of array
                # area when creating the heatmap
                if not (0 <= center_int[0] < feature_map_size[0]
                        and 0 <= center_int[1] < feature_map_size[1]):
                    continue
                rot = task_boxes[idx][6].cuda()
                box_dim = task_boxes[idx][3:6].cuda()
                box[0][center_int[1], center_int[0]] =torch.cat([
                            center - torch.tensor([x, y], device=box.device),
                            z.unsqueeze(0).cuda(), box_dim,
                            torch.sin(rot).unsqueeze(0),
                            torch.cos(rot).unsqueeze(0), cls_id.unsqueeze(0)])
            boxes.append(box)
        boxes = torch.cat(boxes,0)
        maps = maps.permute(0,3,1,2).contiguous()
        boxes = boxes.permute(0,3,1,2).contiguous()
        kwargs['img_inputs'].insert(0, [boxes,maps,coord])
        losses = super().forward_train(self,  **kwargs)
        return losses