# nuscenes 坐标变换

from nuscenes.nuscenes import NuScenes
import numpy as np
import os
import cv2

from pyquaternion import Quaternion   # 关于四元数的操作  把四元数转换成3*3的转换矩阵
from nuscenes.utils.data_classes import Box    # 获取标注框的角点


# 构建 nuscenes类
nuscenes_data = "E:\\data\\3DpointCloud\\nuscenes\\nuscenes_mini"
nusc = NuScenes(version="v1.0-mini", dataroot=nuscenes_data, verbose=False)

my_sample = nusc.sample[0]
# print(my_sample)

# 获取lidar的数据
lidar_token = my_sample["data"]["LIDAR_TOP"]
lidar_sample_data = nusc.get("sample_data", lidar_token)
# print(lidar_sample_data)
lidar_file = os.path.join(nuscenes_data, lidar_sample_data["filename"])

# 加载点云数据
lidar_points = np.fromfile(lidar_file, dtype=np.float32).reshape(-1, 5)
print(lidar_points)
# 点云数据坐标是相对于 lidar坐标而言的

# 坐标系 
# 1. 全局坐标系 global coordinate
#    -  可以认为，车辆在t0时刻的位置认为是全局坐标的原点
# 2. 车体坐标系  ego_pose, ego_coordinate
#    - 以车体为原点的坐标系
# 3. 传感器坐标系
#   - liadr  坐标系
#   - camera 坐标系
#   - radar  坐标系

# 标定 calibrater
# lidar的标定，获得的结果是，lidar相对于ego而言的位置 translation，和旋转rotation
#                            - translation 可以用三个float数字表示位置 
#                            - rotation 用四个float数字表示旋转
# camera的标定，获得的结果是， camera相对于ego而言的位置 translation 和旋转 rotation
#                            - 相机内参 camera intrinsic(3d->2d平面)

# 将点云映射到图像上
# lidar_points -> ego_pose -> global -> ego_pose -> camers -> intrinsic -> image

# 1 lidar calibrater 获取lidar 的标定
lidar_calibrater = nusc.get("calibrated_sensor", lidar_sample_data["calibrated_sensor_token"])
print(lidar_calibrater)
print(Quaternion(lidar_calibrater["rotation"]).rotation_matrix)

# 将四元数转成 3*3变换矩阵
def get_matrix(carlibrated_data, inverse=False):
    output = np.eye(4)
    output[:3,:3] = Quaternion(carlibrated_data["rotation"]).rotation_matrix
    output[:3, 3] = carlibrated_data["translation"]
    if inverse:
        output = np.linalg.inv(output)

    return output
print("------")
print(get_matrix(lidar_calibrater))

lidar_pose = get_matrix(lidar_calibrater)  # lidar_pose 是基于ego_pose 而言的
# lidar -> ego_pose   ===>  lidar_pose @ lidar_points.T
# ego -> global       ===>  ego_pose @  ego_points.T
ego_pose = nusc.get("ego_pose", lidar_sample_data["ego_pose_token"])
ego_pose = get_matrix(ego_pose)
print(ego_pose)
lidar_to_global = ego_pose @ lidar_pose

# lidar_points -> N*5  (x, y, z, intensity, ringindex)  -> [x,y,z,1]
hom_points = np.concatenate([lidar_points[:, :3], np.ones((len(lidar_points), 1))], axis=1)

global_points = hom_points @ lidar_to_global.T

# lidar2global = ego_pose @ lidar_pose

# 一共有六个摄像头
cameras = ["CAM_FRONT","CAM_BACK", "CAM_BACK_LEFT", "CAM_FRONT_LEFT", "CAM_FRONT_RIGHT", "CAM_BACK_RIGHT"]
for cam in cameras:
    camera_token = my_sample['data'][cam]
    # print(camera_token)
    camera_data = nusc.get("sample_data", camera_token)
    image_file = os.path.join(nuscenes_data, camera_data["filename"])
    image = cv2.imread(image_file)

    camera_ego_pose = nusc.get("ego_pose", camera_data["ego_pose_token"])
    global_to_ego = get_matrix(camera_ego_pose, True)   #   global ->  camers_ego_pose

    camera_calibrated = nusc.get("calibrated_sensor", camera_data["calibrated_sensor_token"])
    ego_to_camera = get_matrix(camera_calibrated, True)           #    ego_pose -> camera
    camera_intrinsic = np.eye(4)

    camera_intrinsic[:3, :3] = camera_calibrated["camera_intrinsic"]
    # print("------")
    # print(camera_intrinsic)

    # global ->  camers_ego_pose  -> camera -> image
    global_to_image = camera_intrinsic @ ego_to_camera @ global_to_ego

    for ann_token in my_sample["anns"]:
        annotation = nusc.get("sample_annotation", ann_token)
        box_pose = Box(annotation["translation"], annotation['size'], Quaternion(annotation["rotation"]))  
        corners = box_pose.corners().T
        corners = np.concatenate([corners, np.ones((len(corners), 1))], axis=1)
        image_corners = corners @ global_to_image.T
        image_corners[:, :2] /= image_corners[:, [2]]
        image_corners = image_corners.astype(np.int32)

        # for x, y in image_corners[image_corners[:, 2]>0, :2].astype(int):
        #     cv2.circle(image, (x, y), 3, (0, 0, 255), -1, 16)
        ix, iy = [0,1,2,3,0,1,2,3,4,5,6,7], [4,5,6,7,1,2,3,0,5,6,7,4]
        for p0, p1 in zip(image_corners[ix], image_corners[iy]):
            if p0[2] <= 0 or p1[2] <= 0 :
                continue
            cv2.line(image, (p0[0], p0[1]), (p1[0], p1[1]), (0,255,0), 2, 16)
            pass
         

    # 坐标变换 
    image_points =  global_points @ global_to_image.T
    image_points[:, :2] /= image_points[:, [2]]

    # 过滤z<0 的值， 无法形成投影
    for x, y in image_points[image_points[:, 2]>0, :2].astype(int):
        # print(x, y)
        cv2.circle(image, (x, y), 3, (255, 0, 0), -1, 16)
    cv2.imwrite(f"{cam}.jpg", image)

