'''
  nuscenes数据集读取都是靠token标识,使用字典加载
  一个激光数据,6路摄像头,5路毫米波雷达
  1. 先获取token标识,
  2. 根据token获取数据
'''

from nuscenes.nuscenes import NuScenes
import numpy as np
import cv2
import os
from pyquaternion import Quaternion


import torch
import cv_bridge

version = "v1.0-mini"
dataroot = "/home/lin/code/bevdetv2.1/BEVDet/data/nuscenes"

nusc = NuScenes(version, dataroot, verbose=False)

print("sample_number: ", len(nusc.sample), "\n")

sample = (nusc.sample[0])

# 根据token获取雷达信息
lidar_token = sample['data']['LIDAR_TOP']
# print(lidar_token)

lidar_sample_info = nusc.get("sample_data", lidar_token)
# print(lidar_sample_info)

# 雷达路径 + 加载雷达数据 + 打印雷达shape 
lidar_filename = os.path.join(dataroot, lidar_sample_info["filename"])
lidar_points = np.fromfile(lidar_filename, dtype = np.float32).reshape(-1, 5)

def get_matrix(q, t):
    T = np.eye(4)
    T[:3, :3] = Quaternion(q)
    T[:3, -1] = t
    return T

# ego2global
lidar_ego_pose_token = lidar_sample_info['ego_pose_token']
lidar_ego_pose_data = nusc.get("ego_pose", lidar_ego_pose_token)
rotation = lidar_ego_pose_data['rotation']
translate = lidar_ego_pose_data['translation']
Tglobal_ego = get_matrix(rotation, translate)

# lidar2ego
lidar_calibrated_sensor_token = lidar_sample_info['calibrated_sensor_token']
lidar_calibrated_sensor_data = nusc.get('calibrated_sensor', lidar_calibrated_sensor_token)
rotation = lidar_calibrated_sensor_data['rotation']
translate = lidar_calibrated_sensor_data['translation']
Tego_lidar = get_matrix(rotation, translate)
# print(Tego_lidar)
# @ 和dot都能表示矩阵乘法, @是python3.5以上支持，dot支持二维数组
Tglobal_lidar = Tglobal_ego @ Tego_lidar 

# print(Tglobal_lidar)

print("lidar:")
print("lidar_filename:\n", lidar_filename)
print("lidar_points.shape:\n", lidar_points.shape)
print("Tglobal_lidar:\n", Tglobal_lidar)


hom_points = np.concatenate([lidar_points[:, :3], np.ones((len(lidar_points), 1))], axis=1)
# print("hom_cloud.shape: ", hom_cloud.shape)

# 雷达其次坐标 转换到global系下
global_points = hom_points @ Tglobal_lidar.T   # 和下面结果是转置关系
print(global_points.shape)

# []列表，有序可变可重复 ；() 有序不可变可重复 
cameras = ['CAM_FRONT', 'CAM_FRONT_LEFT', 'CAM_FRONT_RIGHT','CAM_BACK', 'CAM_BACK_LEFT', 'CAM_BACK_RIGHT']

for cam in cameras:
    if (cam != 'CAM_FRONT_LEFT'):
        continue 
    # 取cam的token标志
    camera_token = sample['data'][cam]
    camera_sample_info = nusc.get("sample_data", camera_token)
    camera_filename = os.path.join(dataroot, camera_sample_info['filename'])
    
    # ego2global
    camera_ego_pose_token = camera_sample_info['ego_pose_token']
    camera_ego_pose_data = nusc.get('ego_pose', camera_ego_pose_token)
    
    rotation = camera_ego_pose_data['rotation']
    translate = camera_ego_pose_data['translation']
    Tego_global2 = get_matrix(rotation, translate).T

    # ego2global
    camera_calibrated_sensor_token = camera_sample_info['calibrated_sensor_token']
    camera_calibrated_sensor_data = nusc.get('calibrated_sensor', camera_calibrated_sensor_token)
    
    # 相机内参
    camera_intrinsic = np.eye(4)
    camera_intrinsic[:3, :3] = camera_calibrated_sensor_data['camera_intrinsic']
    
    rotation  = camera_calibrated_sensor_data['rotation']
    translate = camera_calibrated_sensor_data['translation']
    Tcam_ego  = get_matrix(rotation, translate).T
    
    Tcam_global =  camera_intrinsic @ Tcam_ego @ Tego_global2
    
    image_points =  global_points @ Tcam_global.T 
    print(image_points.shape)
    # print(lidar_points[:5, :])
    # print(image_points[:5, :])

     # 相机归一化平面处理 / z
    print(image_points[:5, :])
    image_points[ :, :3] /= image_points[ : , [2]]
    print(lidar_points[:10, :])
    
    print(image_points[:10, :])

    # 排除z <= 0的像素点
    # for u, v in image_points[image_points[:, 2] > 0, :2].astype(int):
    #     print(u, v)

    # print("\n" + cam + ":")
    # print("camera_filename:\n", camera_filename)
    # print("camera_intrinsic:\n", camera_intrinsic)
    # print("Tglobal_cam:\n", Tglobal_cam)
    