import sys  # 系统相关操作
import cv2  # OpenCV库，用于图像处理
import numpy as np  # 数值计算库
import matplotlib.pyplot as plt  # 绘图库
import pyqtgraph.opengl as gl  # 3D可视化库
from PyQt5.QtWidgets import QApplication  # GUI应用框架

# 使用jet颜色映射
cmap = plt.cm.jet


def read_bin(bin_path, intensity=False):
    """
    读取kitti bin格式文件点云
    :param bin_path:   点云路径
    :param intensity:  是否要强度
    :return:           numpy.ndarray `N x 3` or `N x 4`
    """

    # 从二进制文件读取点云数据，每个点包含x,y,z,intensity四个值
    lidar_points = np.fromfile(bin_path, dtype=np.float32).reshape((-1, 4))
    # 如果不需要强度信息，只返回x,y,z坐标
    if not intensity:
        lidar_points = lidar_points[:, :3]
    return lidar_points


def read_calib(calib_path):
    """
    读取kitti数据集标定文件
    下载的彩色图像是左边相机的图像, 所以要用P2
    extrinsic = np.matmul(R0, lidar2camera)
    intrinsic = P2
    P中包含第i个相机到0号摄像头的距离偏移(x方向)
    extrinsic变换后的点云是投影到编号为0的相机(参考相机)坐标系中并修正后的点
    intrinsic(P2)变换后可以投影到左边相机图像上
    P0, P1, P2, P3分别代表左边灰度相机，右边灰度相机，左边彩色相机，右边彩色相机
    """
    # 打开并读取标定文件
    with open(calib_path, 'r') as f:
        raw = f.readlines()

    # 解析相机投影矩阵P0-P3
    P0 = np.array(list(map(float, raw[0].split()[1:]))).reshape((3, 4))
    P1 = np.array(list(map(float, raw[1].split()[1:]))).reshape((3, 4))
    P2 = np.array(list(map(float, raw[2].split()[1:]))).reshape((3, 4))
    P3 = np.array(list(map(float, raw[3].split()[1:]))).reshape((3, 4))

    # 解析旋转矩阵R0并扩展为4x4齐次矩阵
    R0 = np.array(list(map(float, raw[4].split()[1:]))).reshape((3, 3))
    R0 = np.hstack((R0, np.array([[0], [0], [0]])))
    R0 = np.vstack((R0, np.array([0, 0, 0, 1])))

    # 解析雷达到相机的变换矩阵
    lidar2camera_m = np.array(list(map(float, raw[5].split()[1:]))).reshape((3, 4))
    lidar2camera_m = np.vstack((lidar2camera_m, np.array([0, 0, 0, 1])))

    # 解析IMU到雷达的变换矩阵
    imu2lidar_m = np.array(list(map(float, raw[6].split()[1:]))).reshape((3, 4))
    imu2lidar_m = np.vstack((imu2lidar_m, np.array([0, 0, 0, 1])))

    return P0, P1, P2, P3, R0, lidar2camera_m, imu2lidar_m


def vis_pointcloud(points, colors=None):
    """
    渲染显示雷达点云
    :param points:    numpy.ndarray  `N x 3`
    :param colors:    numpy.ndarray  `N x 3`  (0, 255)
    :return:
    """
    # 创建Qt应用
    app = QApplication(sys.argv)
    # 创建3D视图窗口
    og_widget = gl.GLViewWidget()
    # 设置点大小
    point_size = np.zeros(points.shape[0], dtype=np.float16) + 0.1

    # 处理颜色数据
    if colors is not None:
        # 将颜色值归一化到0-1范围
        colors = colors / 255
        # 添加alpha通道
        colors = np.hstack((colors, np.ones(shape=(colors.shape[0], 1))))

        # 创建对比用的点云（偏移显示）
        points_item2 = gl.GLScatterPlotItem(pos=points, size=point_size, color=(1, 1, 1, 1), pxMode=False)
        points_item2.translate(0, 0, 20)
        og_widget.addItem(points_item2)

    else:
        colors = (1, 1, 1, 1)  # 默认白色

    # 创建点云显示项
    points_item1 = gl.GLScatterPlotItem(pos=points, size=point_size, color=colors, pxMode=False)
    og_widget.addItem(points_item1)

    # 显示窗口
    og_widget.show()
    sys.exit(app.exec_())


def lidar2camera(point_in_lidar, extrinsic):
    """
    雷达系到相机系投影
    :param point_in_lidar: numpy.ndarray `N x 3`
    :param extrinsic: numpy.ndarray `4 x 4`
    :return: point_in_camera numpy.ndarray `N x 3`
    """
    # 将点转换为齐次坐标
    point_in_lidar = np.hstack((point_in_lidar, np.ones(shape=(point_in_lidar.shape[0], 1)))).T
    # 应用外参变换
    point_in_camera = np.matmul(extrinsic, point_in_lidar)[:-1, :]  # (X, Y, Z)
    point_in_camera = point_in_camera.T
    return point_in_camera


def camera2image(point_in_camera, intrinsic):
    """
    相机系到图像系投影
    :param point_in_camera: point_in_camera numpy.ndarray `N x 3`
    :param intrinsic: numpy.ndarray `3 x 3` or `3 x 4`
    :return: point_in_image numpy.ndarray `N x 3` (u, v, z)
    """
    # 转置点坐标
    point_in_camera = point_in_camera.T
    point_z = point_in_camera[-1]

    # 如果内参矩阵是3x3，添加平移项
    if intrinsic.shape == (3, 3):
        intrinsic = np.hstack((intrinsic, np.zeros((3, 1))))

    # 转换为齐次坐标
    point_in_camera = np.vstack((point_in_camera, np.ones((1, point_in_camera.shape[1]))))
    # 投影到图像平面
    point_in_image = (np.matmul(intrinsic, point_in_camera) / point_z)
    point_in_image[-1] = point_z
    point_in_image = point_in_image.T
    return point_in_image


def lidar2image(point_in_lidar, extrinsic, intrinsic):
    """
    雷达系到图像系投影  获得(u, v, z)
    :param point_in_lidar: numpy.ndarray `N x 3`
    :param extrinsic: numpy.ndarray `4 x 4`
    :param intrinsic: numpy.ndarray `3 x 3` or `3 x 4`
    :return: point_in_image numpy.ndarray `N x 3` (u, v, z)
    """
    # 先将雷达点云转换到相机坐标系
    point_in_camera = lidar2camera(point_in_lidar, extrinsic)
    # 再将相机坐标系的点投影到图像平面
    point_in_image = camera2image(point_in_camera, intrinsic)
    return point_in_image


def get_fov_mask(point_in_lidar, extrinsic, intrinsic, h, w):
    """
    获取fov内的点云mask, 即能够投影在图像上的点云mask
    :param point_in_lidar:   雷达点云 numpy.ndarray `N x 3`
    :param extrinsic:        外参 numpy.ndarray `4 x 4`
    :param intrinsic:        内参 numpy.ndarray `3 x 3` or `3 x 4`
    :param h:                图像高 int
    :param w:                图像宽 int
    :return: point_in_image: (u, v, z)  numpy.ndarray `N x 3`
    :return:                 numpy.ndarray  `1 x N`
    """
    # 将雷达点云投影到图像平面
    point_in_image = lidar2image(point_in_lidar, extrinsic, intrinsic)
    # 筛选出相机前方的点
    front_bound = point_in_image[:, -1] > 0
    # 对图像坐标进行四舍五入
    point_in_image[:, 0] = np.round(point_in_image[:, 0])
    point_in_image[:, 1] = np.round(point_in_image[:, 1])
    # 筛选出在图像范围内的点
    u_bound = np.logical_and(point_in_image[:, 0] >= 0, point_in_image[:, 0] < w)
    v_bound = np.logical_and(point_in_image[:, 1] >= 0, point_in_image[:, 1] < h)
    uv_bound = np.logical_and(u_bound, v_bound)
    # 综合所有条件得到mask
    mask = np.logical_and(front_bound, uv_bound)
    return point_in_image[mask], mask


if __name__ == '__main__':
    # 设置数据路径
    image_path = './data_example/3d_detection/image_2/000003.png'
    bin_path = './data_example/3d_detection/velodyne/000003.bin'
    calib_path = './data_example/3d_detection/calib/000003.txt'

    # 读取点云数据
    point_in_lidar = read_bin(bin_path)
    # 读取彩色图像并转换颜色空间
    color_image = cv2.cvtColor(cv2.imread(image_path), cv2.COLOR_BGR2RGB)
    # 读取标定参数
    _, _, P2, _, R0, lidar2camera_matrix, _ = read_calib(calib_path)

    # 设置内参和外参
    intrinsic = np.matmul(P2, R0)  # 内参
    # extrinsic = np.matmul(R0, lidar2camera_matrix)  # 雷达到相机外参
    extrinsic = lidar2camera_matrix
    h, w = color_image.shape[:2]  # 图像高和宽

    # 获取在图像范围内的点云
    point_in_image, mask = get_fov_mask(point_in_lidar, extrinsic, intrinsic, h, w)
    valid_points = point_in_lidar[mask]

    # 获取点云对应的颜色
    colors = color_image[point_in_image[:, 1].astype(np.int32), point_in_image[:, 0].astype(np.int32)]  # N x 3
    colored_point = np.hstack((valid_points, colors))  # N x 6

    # vis_pointcloud(point_in_lidar[~mask])
    # 显示点云
    vis_pointcloud(points=valid_points, colors=colors)

    # 保存点云和颜色数据
    # np.save('../data_example/points.npy', valid_points)   # (N, 3)  np.float32
    # np.save('../data_example/colors.npy', colors)         # (N, 3)  np.uint8  [0, 255]
