import cv2
import mayavi.mlab as mlab
import numpy as np
import torch

from expansion import depth_map_utils, vis_utils


def get_calib_from_file(calib_file):
    with open(calib_file) as f:
        lines = f.readlines()

    obj = lines[2].strip().split(' ')[1:]
    P2 = np.array(obj, dtype=np.float32)
    obj = lines[3].strip().split(' ')[1:]
    P3 = np.array(obj, dtype=np.float32)
    obj = lines[4].strip().split(' ')[1:]
    R0 = np.array(obj, dtype=np.float32)
    obj = lines[5].strip().split(' ')[1:]
    Tr_velo_to_cam = np.array(obj, dtype=np.float32)

    return {'P2': P2.reshape(3, 4),
            'P3': P3.reshape(3, 4),
            'R0': R0.reshape(3, 3),
            'Tr_velo2cam': Tr_velo_to_cam.reshape(3, 4)}

class Calibration(object):
    def __init__(self, calib_file):
        if not isinstance(calib_file, dict):
            calib = get_calib_from_file(calib_file)
        else:
            calib = calib_file

        self.P2 = calib['P2']  # 3 x 4
        self.R0 = calib['R0']  # 3 x 3
        self.V2C = calib['Tr_velo2cam']  # 3 x 4

        # Camera intrinsics and extrinsics
        self.cu = self.P2[0, 2]
        self.cv = self.P2[1, 2]
        self.fu = self.P2[0, 0]
        self.fv = self.P2[1, 1]
        self.tx = self.P2[0, 3] / (-self.fu)
        self.ty = self.P2[1, 3] / (-self.fv)

    def cart_to_hom(self, pts):
        """
        :param pts: (N, 3 or 2)
        :return pts_hom: (N, 4 or 3)
        """
        pts_hom = np.hstack((pts, np.ones((pts.shape[0], 1), dtype=np.float32)))
        return pts_hom

    def rect_to_lidar(self, pts_rect):
        """
        :param pts_lidar: (N, 3)
        :return pts_rect: (N, 3)
        """
        pts_rect_hom = self.cart_to_hom(pts_rect)  # (N, 4)
        R0_ext = np.hstack((self.R0, np.zeros((3, 1), dtype=np.float32)))  # (3, 4)
        R0_ext = np.vstack((R0_ext, np.zeros((1, 4), dtype=np.float32)))  # (4, 4)
        R0_ext[3, 3] = 1
        V2C_ext = np.vstack((self.V2C, np.zeros((1, 4), dtype=np.float32)))  # (4, 4)
        V2C_ext[3, 3] = 1

        pts_lidar = np.dot(pts_rect_hom, np.linalg.inv(np.dot(R0_ext, V2C_ext).T))
        return pts_lidar[:, 0:3]

    def lidar_to_rect(self, pts_lidar):
        """
        :param pts_lidar: (N, 3)
        :return pts_rect: (N, 3)
        """
        pts_lidar_hom = self.cart_to_hom(pts_lidar)
        pts_rect = np.dot(pts_lidar_hom, np.dot(self.V2C.T, self.R0.T))
        # pts_rect = reduce(np.dot, (pts_lidar_hom, self.V2C.T, self.R0.T))
        return pts_rect

    def rect_to_img(self, pts_rect):
        """
        :param pts_rect: (N, 3)
        :return pts_img: (N, 2)
        """
        pts_rect_hom = self.cart_to_hom(pts_rect)
        pts_2d_hom = np.dot(pts_rect_hom, self.P2.T)
        pts_img = (pts_2d_hom[:, 0:2].T / pts_rect_hom[:, 2]).T  # (N, 2)
        pts_rect_depth = pts_2d_hom[:, 2] - self.P2.T[3, 2]  # depth in rect camera coord
        return pts_img, pts_rect_depth

    def lidar_to_img(self, pts_lidar):
        """
        :param pts_lidar: (N, 3)
        :return pts_img: (N, 2)
        """
        pts_rect = self.lidar_to_rect(pts_lidar)
        pts_img, pts_depth = self.rect_to_img(pts_rect)
        return pts_img, pts_depth

    def img_to_rect(self, u, v, depth_rect):
        """
        :param u: (N)
        :param v: (N)
        :param depth_rect: (N)
        :return:
        """
        x = ((u - self.cu) * depth_rect) / self.fu + self.tx
        y = ((v - self.cv) * depth_rect) / self.fv + self.ty
        pts_rect = np.concatenate((x.reshape(-1, 1), y.reshape(-1, 1), depth_rect.reshape(-1, 1)), axis=1)
        return pts_rect

    def corners3d_to_img_boxes(self, corners3d):
        """
        :param corners3d: (N, 8, 3) corners in rect coordinate
        :return: boxes: (None, 4) [x1, y1, x2, y2] in rgb coordinate
        :return: boxes_corner: (None, 8) [xi, yi] in rgb coordinate
        """
        sample_num = corners3d.shape[0]
        corners3d_hom = np.concatenate((corners3d, np.ones((sample_num, 8, 1))), axis=2)  # (N, 8, 4)

        img_pts = np.matmul(corners3d_hom, self.P2.T)  # (N, 8, 3)

        x, y = img_pts[:, :, 0] / img_pts[:, :, 2], img_pts[:, :, 1] / img_pts[:, :, 2]
        x1, y1 = np.min(x, axis=1), np.min(y, axis=1)
        x2, y2 = np.max(x, axis=1), np.max(y, axis=1)

        boxes = np.concatenate((x1.reshape(-1, 1), y1.reshape(-1, 1), x2.reshape(-1, 1), y2.reshape(-1, 1)), axis=1)
        boxes_corner = np.concatenate((x.reshape(-1, 8, 1), y.reshape(-1, 8, 1)), axis=2)

        return boxes, boxes_corner

def get_calib( idx):
    calib_file = idx
    # assert calib_file.exists()
    return Calibration(calib_file)


def cls_type_to_id(cls_type):
    type_to_id = {'Car': 1, 'Pedestrian': 2, 'Cyclist': 3, 'Van': 4}
    if cls_type not in type_to_id.keys():
        return -1
    return type_to_id[cls_type]

class Object3d(object):
    def __init__(self, line):
        label = line.strip().split(' ')
        self.src = line
        self.cls_type = label[0]
        self.cls_id = cls_type_to_id(self.cls_type)
        self.truncation = float(label[1])
        self.occlusion = float(label[2])  # 0:fully visible 1:partly occluded 2:largely occluded 3:unknown
        self.alpha = float(label[3])
        self.box2d = np.array((float(label[4]), float(label[5]), float(label[6]), float(label[7])), dtype=np.float32)
        self.h = float(label[8])
        self.w = float(label[9])
        self.l = float(label[10])
        self.loc = np.array((float(label[11]), float(label[12]), float(label[13])), dtype=np.float32)
        self.dis_to_cam = np.linalg.norm(self.loc)
        self.ry = float(label[14])
        self.score = float(label[15]) if label.__len__() == 16 else -1.0
        self.level_str = None
        self.level = self.get_kitti_obj_level()

    def get_kitti_obj_level(self):
        height = float(self.box2d[3]) - float(self.box2d[1]) + 1

        if height >= 40 and self.truncation <= 0.15 and self.occlusion <= 0:
            self.level_str = 'Easy'
            return 0  # Easy
        elif height >= 25 and self.truncation <= 0.3 and self.occlusion <= 1:
            self.level_str = 'Moderate'
            return 1  # Moderate
        elif height >= 25 and self.truncation <= 0.5 and self.occlusion <= 2:
            self.level_str = 'Hard'
            return 2  # Hard
        else:
            self.level_str = 'UnKnown'
            return -1

    def generate_corners3d(self):
        """
        generate corners3d representation for this object
        :return corners_3d: (8, 3) corners of box3d in camera coord
        """
        l, h, w = self.l, self.h, self.w
        x_corners = [l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2]
        y_corners = [0, 0, 0, 0, -h, -h, -h, -h]
        z_corners = [w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2]

        R = np.array([[np.cos(self.ry), 0, np.sin(self.ry)],
                      [0, 1, 0],
                      [-np.sin(self.ry), 0, np.cos(self.ry)]])
        corners3d = np.vstack([x_corners, y_corners, z_corners])  # (3, 8)
        corners3d = np.dot(R, corners3d).T
        corners3d = corners3d + self.loc
        return corners3d

    def to_str(self):
        print_str = '%s %.3f %.3f %.3f box2d: %s hwl: [%.3f %.3f %.3f] pos: %s ry: %.3f' \
                     % (self.cls_type, self.truncation, self.occlusion, self.alpha, self.box2d, self.h, self.w, self.l,
                        self.loc, self.ry)
        return print_str

    def to_kitti_format(self):
        kitti_str = '%s %.2f %d %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f' \
                    % (self.cls_type, self.truncation, int(self.occlusion), self.alpha, self.box2d[0], self.box2d[1],
                       self.box2d[2], self.box2d[3], self.h, self.w, self.l, self.loc[0], self.loc[1], self.loc[2],
                       self.ry)
        return kitti_str

def get_objects_from_label(label_file):
    with open(label_file, 'r') as f:
        lines = f.readlines()
    objects = [Object3d(line) for line in lines]
    return objects

def get_label( idx):
    label_file = idx
    # assert label_file.exists()
    return get_objects_from_label(label_file)


def draw_grid(x1, y1, x2, y2, fig, tube_radius=None, color=(0.5, 0.5, 0.5)):
    mlab.plot3d([x1, x1], [y1, y2], [0, 0], color=color, tube_radius=tube_radius, line_width=1, figure=fig)
    mlab.plot3d([x2, x2], [y1, y2], [0, 0], color=color, tube_radius=tube_radius, line_width=1, figure=fig)
    mlab.plot3d([x1, x2], [y1, y1], [0, 0], color=color, tube_radius=tube_radius, line_width=1, figure=fig)
    mlab.plot3d([x1, x2], [y2, y2], [0, 0], color=color, tube_radius=tube_radius, line_width=1, figure=fig)
    return fig

def draw_multi_grid_range(fig, grid_size=20, bv_range=(-60, -60, 60, 60)):
    for x in range(bv_range[0], bv_range[2], grid_size):
        for y in range(bv_range[1], bv_range[3], grid_size):
            fig = draw_grid(x, y, x + grid_size, y + grid_size, fig)

    return fig


def visualize_pts(pts, fig=None, bgcolor=(0, 0, 0), fgcolor=(1.0, 1.0, 1.0),
                  show_intensity=False, size=(600, 600), draw_origin=True):
    if not isinstance(pts, np.ndarray):
        pts = pts.cpu().numpy()
    if fig is None:
        fig = mlab.figure(figure=None, bgcolor=bgcolor, fgcolor=fgcolor, engine=None, size=size)

    if show_intensity:
        print(pts[:, 0], pts[:, 1], pts[:, 2], pts[:, 3])
        G = mlab.points3d(pts[:, 0], pts[:, 1], pts[:, 2], pts[:, 3], mode='point',
                          colormap='gnuplot', scale_factor=1, figure=fig)
    else:
        G = mlab.points3d(pts[:, 0], pts[:, 1], pts[:, 2], mode='point',
                          colormap='gnuplot', scale_factor=1, figure=fig)
    if draw_origin:
        mlab.points3d(0, 0, 0, color=(1, 1, 1), mode='cube', scale_factor=0.2)
        mlab.plot3d([0, 3], [0, 0], [0, 0], color=(0, 0, 1), tube_radius=0.1)
        mlab.plot3d([0, 0], [0, 3], [0, 0], color=(0, 1, 0), tube_radius=0.1)
        mlab.plot3d([0, 0], [0, 0], [0, 3], color=(1, 0, 0), tube_radius=0.1)

    return fig

def check_numpy_to_torch(x):
    if isinstance(x, np.ndarray):
        return torch.from_numpy(x).float(), True
    return x, False

def rotate_points_along_z(points, angle):
    """
    Args:
        points: (B, N, 3 + C)
        angle: (B), angle along z-axis, angle increases x ==> y
    Returns:

    """
    points, is_numpy = check_numpy_to_torch(points)
    angle, _ = check_numpy_to_torch(angle)

    cosa = torch.cos(angle)
    sina = torch.sin(angle)
    zeros = angle.new_zeros(points.shape[0])
    ones = angle.new_ones(points.shape[0])
    rot_matrix = torch.stack((
        cosa,  sina, zeros,
        -sina, cosa, zeros,
        zeros, zeros, ones
    ), dim=1).view(-1, 3, 3).float()
    points_rot = torch.matmul(points[:, :, 0:3], rot_matrix)
    points_rot = torch.cat((points_rot, points[:, :, 3:]), dim=-1)
    return points_rot.numpy() if is_numpy else points_rot

def boxes_to_corners_3d(boxes3d):
    """
        7 -------- 4
       /|         /|
      6 -------- 5 .
      | |        | |
      . 3 -------- 0
      |/         |/
      2 -------- 1
    Args:
        boxes3d:  (N, 7) [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center

    Returns:
    """
    boxes3d, is_numpy = check_numpy_to_torch(boxes3d)

    template = boxes3d.new_tensor((
        [1, 1, -1], [1, -1, -1], [-1, -1, -1], [-1, 1, -1],
        [1, 1, 1], [1, -1, 1], [-1, -1, 1], [-1, 1, 1],
    )) / 2

    corners3d = boxes3d[:, None, 3:6].repeat(1, 8, 1) * template[None, :, :]
    corners3d = rotate_points_along_z(corners3d.view(-1, 8, 3), boxes3d[:, 6]).view(-1, 8, 3)
    corners3d += boxes3d[:, None, 0:3]

    return corners3d.numpy() if is_numpy else corners3d

def draw_corners3d(corners3d, fig, color=(1, 1, 1), line_width=2, cls=None, tag='', max_num=500, tube_radius=None):
    """
    :param corners3d: (N, 8, 3)
    :param fig:
    :param color:
    :param line_width:
    :param cls:
    :param tag:
    :param max_num:
    :return:
    """
    import mayavi.mlab as mlab
    num = min(max_num, len(corners3d))
    for n in range(num):
        b = corners3d[n]  # (8, 3)

        if cls is not None:
            if isinstance(cls, np.ndarray):
                mlab.text3d(b[6, 0], b[6, 1], b[6, 2], '%.2f' % cls[n], scale=(0.3, 0.3, 0.3), color=color, figure=fig)
            else:
                mlab.text3d(b[6, 0], b[6, 1], b[6, 2], '%s' % cls[n], scale=(0.3, 0.3, 0.3), color=color, figure=fig)

        for k in range(0, 4):
            i, j = k, (k + 1) % 4
            mlab.plot3d([b[i, 0], b[j, 0]], [b[i, 1], b[j, 1]], [b[i, 2], b[j, 2]], color=color, tube_radius=tube_radius,
                        line_width=line_width, figure=fig)

            i, j = k + 4, (k + 1) % 4 + 4
            mlab.plot3d([b[i, 0], b[j, 0]], [b[i, 1], b[j, 1]], [b[i, 2], b[j, 2]], color=color, tube_radius=tube_radius,
                        line_width=line_width, figure=fig)

            i, j = k, k + 4
            mlab.plot3d([b[i, 0], b[j, 0]], [b[i, 1], b[j, 1]], [b[i, 2], b[j, 2]], color=color, tube_radius=tube_radius,
                        line_width=line_width, figure=fig)

        i, j = 0, 5
        mlab.plot3d([b[i, 0], b[j, 0]], [b[i, 1], b[j, 1]], [b[i, 2], b[j, 2]], color=color, tube_radius=tube_radius,
                    line_width=line_width, figure=fig)
        i, j = 1, 4
        mlab.plot3d([b[i, 0], b[j, 0]], [b[i, 1], b[j, 1]], [b[i, 2], b[j, 2]], color=color, tube_radius=tube_radius,
                    line_width=line_width, figure=fig)

    return fig

def get_fov_flag(pts_rect, img_shape, calib):
        """
        Args:
            pts_rect:
            img_shape:
            calib:

        Returns:

        """
        pts_img, pts_rect_depth = calib.rect_to_img(pts_rect)
        val_flag_1 = np.logical_and(pts_img[:, 0] >= 0, pts_img[:, 0] < img_shape[1])
        val_flag_2 = np.logical_and(pts_img[:, 1] >= 0, pts_img[:, 1] < img_shape[0])
        val_flag_merge = np.logical_and(val_flag_1, val_flag_2)
        pts_valid_flag = np.logical_and(val_flag_merge, pts_rect_depth >= 0)
        return pts_valid_flag

# def corners3d_of_camera(corners):
#     x_list=np.ones(3)*0.25
#     y_list = np.ones(3)*(-0.133)
#     z_list = np.ones(3)*(-0.23)
#     T_list = np.arrary([[np.zeros,-np.ones,np.zeros,x_list],
#                         [np.zeros,np.zeros,-np.ones,y_list],
#                         [np.ones,np.zeros,np.zeros,z_list]])
#     cor_cam=

# def visualize_image_3dbbox(img,calib,bbox3d):
#     corners3d = corners3d_of_camera(bbox3d) #(N,8,3)
#     # Project 3D bb into image plane
#     img_boxes, _ = calib.corners3d_to_img_boxes(corners3d)
#
#     img_boxes[:, 0] = np.clip(img_boxes[:, 0], 0, img_shape[1] - 1)
#     img_boxes[:, 1] = np.clip(img_boxes[:, 1], 0, img_shape[0] - 1)
#     img_boxes[:, 2] = np.clip(img_boxes[:, 2], 0, img_shape[1] - 1)
#     img_boxes[:, 3] = np.clip(img_boxes[:, 3], 0, img_shape[0] - 1)
#
#     img_boxes_w = img_boxes[:, 2] - img_boxes[:, 0]
#     img_boxes_h = img_boxes[:, 3] - img_boxes[:, 1]
#     box_valid_mask = np.logical_and(img_boxes_w < img_shape[1] * 0.8, img_boxes_h < img_shape[0] * 0.8)
#depth_com_pointox3d.shape[0]):
#
#         if box_valid_mask[k] == 0:
#             continue
#         corners2d = kitti_utils.project_to_image(corners3d[k], calib.P2)
#         colors = get_color()
#         img2 = kitti_utils.draw_projected_box3d(img2, corners2d,colors[k])
#     self.image_pub.publish(self.bridge.cv2_to_imgmsg(img2, "bgr8"))







frame = "000154"
dataset_name = "0211"

points = np.fromfile("/home/st/ubuntu_data/dataset/gazebo/"+dataset_name+"/training/velodyne/"+frame+".bin", dtype=np.float32).reshape(-1, 4)
points = points[points[:, 2] > -0.72]
print("my debug: ", points.shape)


#calib
calib = get_calib("/home/st/ubuntu_data/dataset/gazebo/"+dataset_name+"/training/calib/"+frame+".txt")

#calib

#3dbbox
obj_list = get_label("/home/st/ubuntu_data/dataset/gazebo/"+dataset_name+"/training/label_2/"+frame+".txt")

#depth completion
pts_rect = calib.lidar_to_rect(points[:, 0:3])
fov_flag  = get_fov_flag(pts_rect, np.array([480, 640]), calib)
pts_fov = pts_rect[fov_flag]
pts_image, depth = calib.rect_to_img(pts_fov)
print("my debug: ", pts_image.shape, depth.shape)
depth_img = np.zeros((480,640,1), np.float32)
for index in range(pts_image.shape[0]):
    depth_img[int(pts_image[index][1])][int(pts_image[index][0])] = depth[index]

# mask = cv2.imread("/home/st/ubuntu_data/NRSL/depth_completion/history/depthComp/Examples/my_seg.png")
# cv2.imread("seg", mask)
# mask = mask[mask == 1]
# depth_img = depth_img[mask]
# depth_image = (depth_img * 256).astype(np.uint16)
# cv2.imwrite("/home/st/ubuntu_data/NRSL/depth_completion/history/depthComp/Examples/my_depth.png", depth_img)

# cv2.imshow("depth raw", depth_img)
# final_depths = depth_map_utils.fill_in_fast(
#                 depth_img, extrapolate=True, blur_type='gaussian')
# depth_image = (final_depths * 256).astype(np.uint16)
# cv2.imshow("depth completion", depth_img)
# cv2.waitKey(0)

# depth_com_point = calib.img_to_rect(0, 0, depth_img[0][0])
# print("my debug ",depth_img.shape)
# for v in range(depth_img.shape[0]):
#     for u in range(depth_img.shape[1]):
#         if u+v==0 or  depth_img[v][u] == 0:
#             continue
#         depth_com_point = np.concatenate((depth_com_point, calib.img_to_rect(u, v, final_depths[v][u])), axis=0)

# print("my debug:", depth_com_point.shape)
# depth completion

#2dbbox
image=cv2.imread("/home/st/ubuntu_data/dataset/gazebo/"+dataset_name+"/training/image_2/"+frame+".png");
image_bbox=np.concatenate([obj.box2d.reshape(1, 4) for obj in obj_list], axis=0)

 #depth completion
depth_com_point = calib.img_to_rect(0, 0, depth_img[0][0])

for i in range(image_bbox.shape[0]):
    x1 = int(image_bbox[i][0])
    y1 = int(image_bbox[i][1])
    x2 = int(image_bbox[i][2])
    y2 = int(image_bbox[i][3])
    image=cv2.rectangle(image,(image_bbox[i][0],image_bbox[i][1]),(image_bbox[i][2],image_bbox[i][3]),2)
    roi=depth_img[y1:y2, x1:x2]
    # print("my debug", roi.shape)
    final_depths = depth_map_utils.fill_in_fast(
                    roi, extrapolate=False, blur_type='gaussian')
    # print("my debug ",depth_img.shape)
    for v in range(int(image_bbox[i][1]), (y2 - 1)):
        for u in range(int(image_bbox[i][0]), (x2 - 1)):
            if u+v==0 :
                continue
            # print("my debug", y2 - 2 - y1, "   ", u - x1)
            depth_com_point = np.concatenate((depth_com_point, calib.img_to_rect(u, v, final_depths[v - y1][u - x1])), axis=0)

    # print("my debug:", depth_com_point.shape)
    #depth completion



# image=cv2.circle(image,(262,239),2,(255,0,0),2)
# image=cv2.circle(image,(262,328),2,(255,0,0),2)
# image=cv2.circle(image,(281,299),2,(255,0,0),2)
# image=cv2.circle(image,(281,239),2,(255,0,0),2)
# image=cv2.circle(image,(378,239),2,(255,0,0),2)
# image=cv2.circle(image,(378,328),2,(255,0,0),2)
# image=cv2.circle(image,(359,299),2,(255,0,0),2)
# image=cv2.circle(image,(359,239),2,(255,0,0),2)
#for u in range(final_depths.shape[0]):
#     for v in range(final_depths.shape[1]):
#         if u+v==0 or  depth_img[u][v] == 0:
#             continue
#         cv2.circle(image, (v, u), 2, [255, 0, 0],1)
# image=cv2.circle(image,(124,322),2,(255,0,0),2)
# image=cv2.circle(image,(174,301),2,(255,0,0),2)
# image=cv2.circle(image,(174,239),2,(255,0,0),2)

cv2.imshow("2dbbox",image)
for u in range(depth_img.shape[0]):
    for v in range(depth_img.shape[1]):
        if u+v==0 or  depth_img[u][v] == 0:
            continue
        cv2.circle(image, (v, u), 2, [255, 0, 0],1)

cv2.imshow("depth",image)




annotations = {}
annotations['name'] = np.array([obj.cls_type for obj in obj_list])
annotations['truncated'] = np.array([obj.truncation for obj in obj_list])
annotations['occluded'] = np.array([obj.occlusion for obj in obj_list])
annotations['alpha'] = np.array([obj.alpha for obj in obj_list])
annotations['bbox'] = np.concatenate([obj.box2d.reshape(1, 4) for obj in obj_list], axis=0)
annotations['dimensions'] = np.array([[obj.l, obj.h, obj.w] for obj in obj_list])  # lhw(camera) format
annotations['location'] = np.concatenate([obj.loc.reshape(1, 3) for obj in obj_list], axis=0)
annotations['rotation_y'] = np.array([obj.ry for obj in obj_list])
annotations['score'] = np.array([obj.score for obj in obj_list])
annotations['difficulty'] = np.array([obj.level for obj in obj_list], np.int32)

num_objects = len([obj.cls_type for obj in obj_list if obj.cls_type != 'DontCare'])
num_gt = len(annotations['name'])
index = list(range(num_objects)) + [-1] * (num_gt - num_objects)
annotations['index'] = np.array(index, dtype=np.int32)

loc = annotations['location'][:num_objects]
dims = annotations['dimensions'][:num_objects]
rots = annotations['rotation_y'][:num_objects]
loc_lidar = calib.rect_to_lidar(loc)
l, h, w = dims[:, 0:1], dims[:, 1:2], dims[:, 2:3]
loc_lidar[:, 2] += h[:, 0] / 2
gt_boxes_lidar = np.concatenate([loc_lidar, l, w, h, -(np.pi / 2 + rots[..., np.newaxis])], axis=1)
#3dbbox



# fig=visualize_pts(points,show_intensity=False)

depth_com_point = calib.rect_to_lidar(depth_com_point).astype(np.float32)
# # print("my debug depth_com_point", depth_com_point.dtype)
depth_com_point = np.hstack((depth_com_point, np.ones((depth_com_point.shape[0], 1), dtype=np.float32)))
# # print("my debug depth_add_intensity", depth_com_point.shape)
depth_com_point.tofile("depth_com_points.bin")

# writer = open("depth_com_points.bin","wb")
# writer.write(depth_com_point)
# writer.close()
# depth_com_point = np.fromfile("depth_com_points.bin", dtype=np.float32).reshape(-1, 4)

fig=visualize_pts(depth_com_point, show_intensity=False)
fig =draw_multi_grid_range(fig)
corners3d = boxes_to_corners_3d(gt_boxes_lidar)
fig = draw_corners3d(corners3d, fig=fig, color=(0, 0, 1), max_num=100)


mlab.view(azimuth=-179, elevation=54.0, distance=104.0, roll=90.0)
mlab.show(stop=True)
