# pip install nuscenes-devkit               # tingfeng1 需要包 nuscenes-devkit
from nuscenes.nuscenes import NuScenes
from nuscenes.utils.data_classes import Box
from pyquaternion import Quaternion
import numpy as np
import cv2
import os
import copy
import math
import matplotlib.pyplot as plt

color_mapping = np.asarray([
    [0, 0, 0],
    [255, 179, 0],
    [128, 62, 117],
    [255, 104, 0],
    [166, 189, 215],
    [193, 0, 32],
    [206, 162, 98],
    [129, 112, 102],
    [0, 125, 52],
    [246, 118, 142],
    [0, 83, 138],
    [255, 122, 92],
    [83, 55, 122],
    [255, 142, 0],
    [179, 40, 81],
    [244, 200, 0],
    [127, 24, 13],
    [147, 170, 0],
    [89, 51, 21],
    [241, 58, 19],
    [35, 44, 22],
    [112, 224, 255],
    [70, 184, 160],
    [153, 0, 255],
    [71, 255, 0],
    [255, 0, 163],
    [255, 204, 0],
    [0, 255, 235],
    [255, 0, 235],
    [255, 0, 122],
    [255, 245, 0],
    [10, 190, 212],
    [214, 255, 0],
    [0, 204, 255],
    [20, 0, 255],
    [255, 255, 0],
    [0, 153, 255],
    [0, 255, 204],
    [41, 255, 0],
    [173, 0, 255],
    [0, 245, 255],
    [71, 0, 255],
    [0, 255, 184],
    [0, 92, 255],
    [184, 255, 0],
    [255, 214, 0],
    [25, 194, 194],
    [92, 0, 255],
    [220, 220, 220],
    [255, 9, 92],
    [112, 9, 255],
    [8, 255, 214],
    [255, 184, 6],
    [10, 255, 71],
    [255, 41, 10],
    [7, 255, 255],
    [224, 255, 8],
    [102, 8, 255],
    [255, 61, 6],
    [255, 194, 7],
    [0, 255, 20],
    [255, 8, 41],
    [255, 5, 153],
    [6, 51, 255],
    [235, 12, 255],
    [160, 150, 20],
    [0, 163, 255],
    [140, 140, 140],
    [250, 10, 15],
    [20, 255, 0],
])

def inv_rt(rt):
    import copy
    rt_inv = copy.deepcopy(rt)
    rt_inv[:3, :3] = np.linalg.inv(rt[:3, :3])
    rt_inv[:3, 3:4] = -np.linalg.inv(rt[:3, :3]) @ rt[:3, 3:4]
    return rt_inv

def to_matrix4x4_2(rotation, translation, inverse=True):
    output = np.eye(4)
    output[:3, :3] = rotation
    output[:3, 3] = translation

    if inverse:
        output[:3, :3] = np.linalg.inv(rotation)
        output[:3, 3] = np.linalg.inv(rotation) @ (-translation)
        # output = np.linalg.inv(output)
    return output


def to_matrix4x4(m):
    output = np.eye(4)
    output[:3, :3] = m
    return output

def pad_ones(pts):
    assert pts.shape[1]==3
    return np.concatenate((pts,np.ones(shape=(len(pts),1))),axis=1)

def gen_corners(box):
    # boxes: [x,y,z,l,w,h,r]
    w, l, h = box[[4, 3, 5]]
    # 3D bounding box corners. (Convention: x points forward, y to the left, z up.)
    x_corners = l / 2 * np.array([1, 1, 1, 1, -1, -1, -1, -1])
    y_corners = w / 2 * np.array([1, -1, -1, 1, 1, -1, -1, 1])
    z_corners = h / 2 * np.array([1, 1, -1, -1, 1, 1, -1, -1])
    corners = np.vstack((x_corners, y_corners, z_corners))
    # Rotate
    rotation_matrix = rotation_matrix_z(box[6])
    corners = np.dot(rotation_matrix, corners)
    # Translate
    x, y, z = box[[0, 1, 2]]
    corners[0, :] = corners[0, :] + x
    corners[1, :] = corners[1, :] + y
    corners[2, :] = corners[2, :] + z
    return corners


def isRotationMatrix(R):
    Rt = np.transpose(R)
    shouldBeIdentity = np.dot(Rt, R)
    I = np.identity(3, dtype=R.dtype)
    n = np.linalg.norm(I - shouldBeIdentity)
    return n < 1e-6

# rotationMatrixToEulerAngles 用于旋转矩阵转欧拉角
def rotationMatrixToEulerAngles(R):
    assert (isRotationMatrix(R))
    sy = math.sqrt(R[0, 0] * R[0, 0] + R[1, 0] * R[1, 0])
    singular = sy < 1e-6
    if not singular:
        x = math.atan2(R[2, 1], R[2, 2])
        y = math.atan2(-R[2, 0], sy)
        z = math.atan2(R[1, 0], R[0, 0])
    else:
        x = math.atan2(-R[1, 2], R[1, 1])
        y = math.atan2(-R[2, 0], sy)
        z = 0
    return np.array([x, y, z])

def get_item(idx):
    version = "mini"
    dataroot = "/files/data/nuscenes"
    nuscenes = NuScenes(version='v1.0-{}'.format(version), dataroot=dataroot, verbose=False)  # tingfeng2 实例化类NuScenes
    sample = nuscenes.sample[idx]  # tingfeng3获取第一个样本
    # get lidar data and tfs
    lidar_sample_data = nuscenes.get('sample_data', sample['data']["LIDAR_TOP"])
    lidar_file = os.path.join(nuscenes.dataroot, lidar_sample_data["filename"])
    lidar_pointcloud = np.fromfile(lidar_file, dtype=np.float32).reshape(-1, 5)
    lidar_pointcloud = np.concatenate([lidar_pointcloud[:, :3], np.ones((len(lidar_pointcloud), 1))], axis=1)
    ego_pose = nuscenes.get('ego_pose', lidar_sample_data['ego_pose_token'])
    ego_to_global = to_matrix4x4_2(Quaternion(ego_pose['rotation']).rotation_matrix, np.array(ego_pose['translation']),
                                   False)
    lidar_sensor = nuscenes.get('calibrated_sensor', lidar_sample_data['calibrated_sensor_token'])
    lidar_to_ego = to_matrix4x4_2(Quaternion(lidar_sensor['rotation']).rotation_matrix,
                                  np.array(lidar_sensor['translation']), False)

    # get boxes data
    boxes = []
    boxes_8c = []
    for annotation_token in sample['anns']:  # tingfeng8 标注的框的信息，在sample['anns']中，遍历标注框
        instance = nuscenes.get('sample_annotation', annotation_token)
        box = Box(instance['translation'], instance['size'], Quaternion(instance['rotation']))
        boxes_8c.append(box.corners().T)
        corners = np.ones((4, 8))

        r = rotationMatrixToEulerAngles(box.rotation_matrix)[2]
        box2 = np.concatenate((box.center, box.wlh[[1, 0, 2]], np.array([r])),
                              axis=0)

        # mat = rotation_matrix_z(2*math.acos(box.orientation.w))
        # mat2 = box.orientation.rotation_matrix
        boxes.append(box2)

        '''
        w, l, h = self.wlh * wlh_factor
        # 3D bounding box corners. (Convention: x points forward, y to the left, z up.)
        x_corners = l / 2 * np.array([1,  1,  1,  1, -1, -1, -1, -1])
        y_corners = w / 2 * np.array([1, -1, -1,  1,  1, -1, -1,  1])
        z_corners = h / 2 * np.array([1,  1, -1, -1,  1,  1, -1, -1])
        corners = np.vstack((x_corners, y_corners, z_corners))
        # Rotate
        corners = np.dot(self.orientation.rotation_matrix, corners)
        # Translate
        x, y, z = self.center
        corners[0, :] = corners[0, :] + x
        corners[1, :] = corners[1, :] + y
        corners[2, :] = corners[2, :] + z
        '''

    OneData = {
        'lidar': {
            'data': lidar_pointcloud,
            'tfs': {
                'lidar_to_ego': lidar_to_ego,
                'ego_to_global': ego_to_global,
            },
        },
        'boxes':np.array(boxes),
        'boxes_8c':np.array(boxes_8c),
        'cameras':[]
    }

    # get cameras data and tfs
    cameras = ['CAM_FRONT_LEFT', 'CAM_FRONT', 'CAM_FRONT_RIGHT', 'CAM_BACK_LEFT', 'CAM_BACK', 'CAM_BACK_RIGHT']
    for camera in cameras:
        camera_sample_data = nuscenes.get('sample_data', sample['data'][camera])
        image_file = os.path.join(nuscenes.dataroot, camera_sample_data["filename"])
        ego_pose = nuscenes.get('ego_pose', camera_sample_data['ego_pose_token'])
        global_to_ego = to_matrix4x4_2(Quaternion(ego_pose['rotation']).rotation_matrix,
                                       np.array(ego_pose['translation']))

        camera_sensor = nuscenes.get('calibrated_sensor', camera_sample_data['calibrated_sensor_token'])
        camera_intrinsic = to_matrix4x4(camera_sensor['camera_intrinsic'])
        ego_to_camera = to_matrix4x4_2(Quaternion(camera_sensor['rotation']).rotation_matrix,
                                       np.array(camera_sensor['translation']))

        # global_to_image = camera_intrinsic @ ego_to_camera @ global_to_ego
        #  根据相机中得到的信息得到的三个矩阵，将global坐标系的坐标转换到image上
        image = cv2.imread(image_file)

        camera_dict ={
            'name': camera,
            'data': image,
            'tfs': {
                'global_to_ego': global_to_ego,
                'ego_to_camera': ego_to_camera,
                'camera_intrinsic': camera_intrinsic,
            }
        }

        OneData['cameras'].append(camera_dict)
    return OneData

def tranform_pt(points, tfs: list):
    '''
    pt from a to b
    Args:
        points: shape = (n,4)
        tfs: [a2x,x2y,...,z2b]
    Returns:
    '''
    assert points.shape[1] == 4
    tf_all = np.array([[1, 0, 0, 0],
                       [0, 1, 0, 0],
                       [0, 0, 1, 0],
                       [0, 0, 0, 1]])
    for tf in tfs:
        tf_all = tf @ tf_all
    return points @ tf_all.T

def tranform_pt_pad(points, tfs: list):
    points4d = pad_ones(points[:,:3])
    points[:,:3] = tranform_pt(points4d, tfs)[:,:3]
    return points

def transform_3Dpts(points, tfs: list):
    if points.shape[1] < 3:
        raise ValueError('Points must have at least 3 dimensions')
    point_xyz1 = pad_ones(points[:,:3])
    points[:,:3] = tranform_pt(point_xyz1, tfs)[:,:3]
    return points

def tranform_pt_2_pixel(points, tfs: list):
    assert points.shape[1] == 4
    new_points = tranform_pt(points, tfs)
    new_points[:, :2] /= new_points[:, 2:3]
    return new_points

def rotation_matrix_z(theta):
    """
    根据绕Z轴的角度theta（以弧度为单位）计算旋转矩阵。
    """
    c, s = np.cos(theta), np.sin(theta)
    return np.array([[c, -s, 0],
                     [s, c, 0],
                     [0, 0, 1]])

def draw_pts2img(points, image, tfs:list, pts_size=1):
    # assert points.shape[1] == 4 or points.shape[1] == 6
    image_new = copy.deepcopy(image)
    image_based_points = tranform_pt_2_pixel(points[:, :4], tfs)
    show_points = image_based_points[image_based_points[:, 2] > 0][:, :3].astype(np.int32)
    for x, y, z in show_points:
        cv2.circle(image_new, (x, y), pts_size, (int(20 * z), int(10 * z // 5), int(255.0 / 1600 * z * z)), -1,
                   16)  # lidar的信息画到image上
    return image_new

def draw_boxes_8c2img(boxes_8c, image, tfs:list):
    image_new = copy.deepcopy(image)
    assert boxes_8c.shape[1] == 8 and boxes_8c.shape[2] == 3
    for corners_res in boxes_8c:  # 8 corners
        corners_res = np.concatenate((corners_res, np.ones(shape=(corners_res.shape[0],1))), axis=1)
        corners = tranform_pt_2_pixel(corners_res, tfs).astype(int)
        ix, iy = [0, 1, 2, 3, 0, 1, 2, 3, 4, 5, 6, 7], [4, 5, 6, 7, 1, 2, 3, 0, 5, 6, 7, 4]
        for p0, p1 in zip(corners[ix], corners[iy]):
            if p0[2] <= 0 or p1[2] <= 0: continue
            cv2.line(image_new, (p0[0], p0[1]), (p1[0], p1[1]), (0, 255, 0), 2, 16)  # tingfeng10 到此是把角点划到image上
            '''   
            循环表示，
            画0 与 4 的边   画1 与 5 的边   画2 与 6 的边 画3 与 7 的边 
            画0 与 1 的边   画1 与 2 的边   画2 与 3 的边 画3 与 0 的边 
            画4 与 5 的边   画5 与 6 的边   画6 与 7 的边 画7 与 4 的边 
                0 ------ 1
              / |     /  |
            4 ------ 5   |
            |   3 ---|-- 2    
            |  /     | /
            7 ------ 6
            '''
    return image_new

def draw_boxes2img(boxes, image, tfs:list, attrs=None):
    image_new = copy.deepcopy(image)
    assert boxes.shape[1] >= 7
    for idx, box in enumerate(boxes):  # 标注的框的信息，在sample['anns']中，遍历标注框
        corners_res = gen_corners(box).T
        corners_res = pad_ones(corners_res)
        corners = tranform_pt_2_pixel(corners_res, tfs).astype(int)
        ix, iy = [0, 1, 2, 3, 0, 1, 2, 3, 4, 5, 6, 7], [4, 5, 6, 7, 1, 2, 3, 0, 5, 6, 7, 4]
        color = tuple((color_mapping[attrs[idx] % len(color_mapping)]).tolist()) if attrs!=None else (0, 255, 0)
        for p0, p1 in zip(corners[ix], corners[iy]):
            if p0[2] <= 0 or p1[2] <= 0: continue
            cv2.line(image_new, (p0[0], p0[1]), (p1[0], p1[1]), color, 2, 16)  # tingfeng10 到此是把角点划到image上
            '''   
            循环表示，
            画0 与 4 的边   画1 与 5 的边   画2 与 6 的边 画3 与 7 的边 
            画0 与 1 的边   画1 与 2 的边   画2 与 3 的边 画3 与 0 的边 
            画4 与 5 的边   画5 与 6 的边   画6 与 7 的边 画7 与 4 的边 
                0 ------ 1
              / |     /  |
            4 ------ 5   |
            |   3 ---|-- 2    
            |  /     | /
            7 ------ 6
            '''
    return image_new

def draw_img2pts(image, points_rgb, tfs:list):
    assert points_rgb.shape[1] == 6
    points4 = np.concatenate((points_rgb[:,:3],np.ones(shape=(len(points_rgb),1))),axis=1)
    image_based_points = tranform_pt_2_pixel(points4, tfs)
    valid_indices = (image_based_points[:, 0] >= 0) & (image_based_points[:, 0] < image.shape[1]) & \
                    (image_based_points[:, 1] >= 0) & (image_based_points[:, 1] < image.shape[0]) & \
                    (image_based_points[:, 2] >= 0)  # 假设z坐标在图像表示中不需要特别处理
    valid_points = image_based_points[valid_indices].astype(np.int32)
    colors = image[valid_points[:, 1], valid_points[:, 0], :3]
    points_rgb[valid_indices, 3:6] = colors
    pass

def read_label_cyw(file_path):
    '''
    读取label，返回字典
    '''
    with open(file_path, 'r') as f:
        lines = f.readlines()
    lines = [line.strip().split(' ') for line in lines]
    annotation = {}
    annotation['name'] = np.array([line[0].lower() for line in lines])
    annotation['location'] = np.array([line[1:4] for line in lines],
                                      dtype=np.float32)  # 激光坐标系下的坐标(location) (x, y, z), 下平面中心点的坐标
    annotation['dimensions'] = np.array([line[4:7] for line in lines],
                                        dtype=np.float32)  # 3d 尺寸(dimensions) hwl -> camera coordinates (lhw)
    annotation['rotation_y'] = np.array([line[7:8] for line in lines], dtype=np.float32)  # 相机坐标系下绕轴旋转的弧度(rotation_y)
    return annotation

def read_label_kitti(file_path):
    with open(file_path, 'r') as f:
        lines = f.readlines()
    lines = [line.strip().split(' ') for line in lines]
    annotation = {}
    annotation['name'] = np.array([line[0:1] for line in lines])
    annotation['truncated'] = np.array([line[1:2] for line in lines], dtype=np.float)  # 截断
    annotation['occluded'] = np.array([line[2:3] for line in lines], dtype=np.int)  # 遮挡
    annotation['alpha'] = np.array([line[3:4] for line in lines], dtype=np.float)  # 观测角
    annotation['bbox'] = np.array([line[4:8] for line in lines], dtype=np.float)  # 图像2d bbox  lt rb
    annotation['dimensions'] = np.array([line[8:11] for line in lines], dtype=np.float)[:,
                               [2, 1, 0]]  # 3d 尺寸(dimensions) hwl -> lwh
    annotation['location'] = np.array([line[11:14] for line in lines],
                                      dtype=np.float)  # 相机坐标系下的坐标(location) (x, y, z), 下平面中心点的坐标
    annotation['location'][:,1] = annotation['location'][:,1] - 0.5 * annotation['dimensions'][:,2]# kitti z(camera坐标系下为y负方向)为最低 所以(z+0.5h) -> 相机坐标系(y-0.5h)然后转到lidar坐标系
    annotation['rotation_y'] = np.array([line[14:15] for line in lines], dtype=np.float)  # 相机坐标系下绕轴旋转的弧度(rotation_y)

    return annotation

def read_label_dr(file_path):
    '''
    读取label，返回字典
    '''
    with open(file_path, 'r') as f:
        lines = f.readlines()
    lines = [line.strip().split(' ') for line in lines]
    annotation = {}
    annotation['name'] = np.array([line[0].lower() for line in lines])
    annotation['location'] = np.array([line[11:14] for line in lines],
                                      dtype=np.float32)  # 激光坐标系下的坐标(location) (x, y, z), 下平面中心点的坐标
    annotation['dimensions'] = np.array([line[10:7:-1] for line in lines],
                                        dtype=np.float32)  # 3d 尺寸(dimensions) hwl -> camera coordinates (lhw)
    # annotation['dimensions'] = annotation['dimensions'][:, [1, 0, 2]]  # 3d 尺寸(dimensions) hwl -> camera coordinates (lhw)
    annotation['rotation_y'] = - np.array([line[14:15] for line in lines],
                                          dtype=np.float32)  # 相机坐标系下绕轴旋转的弧度(rotation_y)
    annotation['rotation_y'] = annotation['rotation_y'] - np.pi / 2  # 修正旋转角度
    #
    # 预测框坐标系变换：相机坐标系--->lidar坐标系
    tr_velo2cam = np.array([[0.9994015951319486, -0.03199927972993585, 0.013133839670170688, 0.2710357611083472],
                            [0.013650110615193858, 0.015969147292999922, -0.9997793060545548, -0.11226274034223951],
                            [0.03178248146242525, 0.9993603116151348, 0.01639638498544132, -0.02040423686369943],
                            [0, 0, 0, 1]])
    trans_z = np.array([[0.0, 1.0, 0.0, 0.0], [-1.0, 0.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], [0, 0, 0, 1]])
    tr_cam2velo = tr_velo2cam.copy()
    tr_cam2velo[:3, :3] = np.linalg.inv(tr_velo2cam[:3, :3])
    tr_cam2velo[:3, -1] = np.dot(-tr_cam2velo[:3, :3], tr_velo2cam[:3, -1])

    # 3D框中心点转换坐标--->
    # box_center = box[:, :3]
    box_center1 = np.concatenate((annotation['location'], np.ones((len(annotation['location']), 1))), axis=1)
    annotation['location'] = (trans_z @ tr_cam2velo @ box_center1.T).T[:, :3]
    # annotation['location'] = (tr_cam2velo @ box_center1.T).T[:, :3]
    annotation['location'][:, 2] += 0.5 * annotation['dimensions'][:, 2]  # 高度修正，kitti相机坐标系以地面为中心 lidar坐标系通常以中心点为中心

    return annotation

def just_look(img, title:str='0'):
    cv2.namedWindow(title, cv2.WINDOW_NORMAL)
    cv2.imshow(title, img)
    cv2.waitKey()

def kpos_self_response(img_pos_embed, img_id, pix_x, pix_y=0):
    import torch.nn.functional as F
    similarity = F.cosine_similarity(img_pos_embed[img_id, pix_y, pix_x, :], img_pos_embed, dim=-1)
    similarity = similarity*0.5+0.5
    return similarity
def width_respone(input, img_id, pix_x, fixed_range=True):
    width_pos_embed = input.detach().cpu()
    img_pos_embed = width_pos_embed[0].unsqueeze(1)
    sims = kpos_self_response(img_pos_embed, img_id, pix_x, 0)
    view_6sims(sims, fixed_range=fixed_range)
    return sims

def pic_respone(input, img_id, pix_x, pix_y, fixed_range=True):
    width_pos_embed = input.detach().cpu()
    img_pos_embed = width_pos_embed[0]
    sims = kpos_self_response(img_pos_embed, img_id, pix_x, pix_y)
    view_6sims(sims, fixed_range=fixed_range)
    return sims

def view_6sims(images, titles=['0','1','2','3','4','5'], fixed_range=True):
    # assert len(images)==6
    # 创建一个包含6个子图的图形
    fig, axs = plt.subplots(2, 3, figsize=(24, 9))  # 2行3列
    for ax, img, title in zip(axs.flat, images, titles):
        if fixed_range:
            ax.imshow(img, cmap='viridis', vmin=0, vmax=1)
        else:
            ax.imshow(img, cmap='viridis', vmin=img.min(), vmax=img.max())
        ax.set_title(title)  # 设置子图标题
        ax.axis('off')  # 关闭坐标轴
    # 显示图形
    plt.tight_layout()  # 自动调整子图参数, 使之填充整个图像区域
    plt.show()

def view_6imgs(images, titles=['0','1','2','3','4','5'], fig_title='Global Title',height=720,width=1280):
    # assert len(images)==6
    # 创建一个包含6个子图的图形
    fig, axs = plt.subplots(2, 3, figsize=(24, 9))  # 2行3列
    # 设置全局标题
    fig.suptitle(fig_title, fontsize=16)  # 可以通过fontsize调整标题大小
    idx=-1
    for ax, title in zip(axs.flat, titles):
        if title is None:
            # img = np.zeros(shape=(height, width, 3), dtype=np.uint8)
            img = np.zeros_like(images[0])
        else:
            idx += 1
            if idx >= len(images):
                continue
            img = images[idx]
            from skimage.transform import resize
            if len(img.shape)==3:
                img = resize(img, (height, width, 3), mode='constant', anti_aliasing=True)
            elif len(img.shape)==2:
                img2= cv2.resize(img, (width, height), interpolation=cv2.INTER_LINEAR)
            # img = img.reshape(height, width, 3)
        if len(img.shape) == 3 and img.shape[2]==3:
            ax.imshow(img[:,:,::-1])  # 显示图像
        else:
            ax.imshow(img)  # 显示图像
        ax.set_title(title)  # 设置子图标题
        ax.axis('off')  # 关闭坐标轴
    # 显示图形
    plt.tight_layout()  # 自动调整子图参数, 使之填充整个图像区域
    plt.show()

def view3d(pts,boxes=None,mode='xyzrgb'):
    from open3d_vis import Visualizer
    vis = Visualizer(pts,boxes, points_size=5, mode=mode)
    import open3d as o3d
    # 创建一个坐标系点云
    axis_pcd = o3d.geometry.TriangleMesh.create_coordinate_frame(size=2.0, origin=[0, 0, 0])
    vis.o3d_visualizer.add_geometry(axis_pcd)
    vis.show()

if __name__ == '__main__':
    # OneData = get_item(58)
    OneData = get_item(255)
    # OneData = get_item(70)

    points_rbg = copy.deepcopy(OneData['lidar']['data'][:,:3])
    points_rbg = points_rbg[(points_rbg[:,0]>-30.0)&
                            (points_rbg[:,0]<50.0) &
                            (points_rbg[:,1]>-30.0)&
                            (points_rbg[:,1]<30.0)&
                            (points_rbg[:,2]>-4.0)&
                            (points_rbg[:,2]<4.0) &
                            ((points_rbg[:,0]*points_rbg[:,0]+points_rbg[:,1]*points_rbg[:,1])>9)]
    points_rbg = np.concatenate((points_rbg, np.zeros_like(points_rbg)),axis=1)
    image_with_ptss=[]
    titles=[]
    for camera_dict in OneData['cameras']:
        tfs = []
        for tf in OneData['lidar']['tfs'].values():
            tfs.append(tf)
        for tf in camera_dict['tfs'].values():
            tfs.append(tf)
        image_new = camera_dict['data']
        image_new = draw_pts2img(OneData['lidar']['data'],camera_dict['data'],tfs)# lidar -> ego -> global -> ego -> camera -> pixel
        image_new = draw_boxes2img(OneData['boxes'],image_new,tfs[2:]) # global -> ego -> camera -> pixel
        # image_new = draw_boxes_8c2img(OneData['boxes_8c'],image_new,tfs[2:]) # global -> ego -> camera -> pixel
        draw_img2pts(camera_dict['data'],points_rbg,tfs)# lidar -> ego -> global -> ego -> camera -> pixel
        image_with_ptss.append(image_new)
        titles.append(camera_dict['name'])

    # just_look(image_with_ptss[0], titles[0])
    view_6imgs(image_with_ptss, titles)

    points_rbg_global = np.ones((len(points_rbg),4))
    points_rbg_global[:,:3] = points_rbg[:,:3]
    tfs = []
    for tf in OneData['lidar']['tfs'].values():
        tfs.append(tf)
    points_rbg_global = tranform_pt(points_rbg_global, tfs)
    points_rbg[:,:3] = points_rbg_global[:,:3]
    nus_boxes = copy.deepcopy(OneData['boxes'])
    nus_boxes[:,2] = nus_boxes[:,2]-0.5*nus_boxes[:,5]
    nus_boxes[:,6] = -nus_boxes[:,6]

    # boxes_pc = OneData['boxes_8c'].reshape(-1,3)
    # view3d(boxes_pc,nus_boxes, mode='xyz')
    view3d(points_rbg,nus_boxes)
    pass
