#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
点云图像融合Web适配版本
基于原始fusion.py的核心算法，移除PyQt依赖，适配Web前端
保持所有核心融合算法逻辑不变
"""

import cv2  # OpenCV库，用于图像处理
import numpy as np  # 数值计算库


def read_bin(bin_path, intensity=False):
    """
    读取kitti bin格式文件点云
    :param bin_path:   点云路径
    :param intensity:  是否要强度
    :return:           numpy.ndarray `N x 3` or `N x 4`
    """
    # 从二进制文件读取点云数据，每个点包含x,y,z,intensity四个值
    lidar_points = np.fromfile(bin_path, dtype=np.float32).reshape((-1, 4))
    # 如果不需要强度信息，只返回x,y,z坐标
    if not intensity:
        lidar_points = lidar_points[:, :3]
    return lidar_points


def read_calib(calib_path):
    """
    读取kitti数据集标定文件
    下载的彩色图像是左边相机的图像, 所以要用P2
    extrinsic = np.matmul(R0, lidar2camera)
    intrinsic = P2
    P中包含第i个相机到0号摄像头的距离偏移(x方向)
    extrinsic变换后的点云是投影到编号为0的相机(参考相机)坐标系中并修正后的点
    intrinsic(P2)变换后可以投影到左边相机图像上
    P0, P1, P2, P3分别代表左边灰度相机，右边灰度相机，左边彩色相机，右边彩色相机
    """
    # 打开并读取标定文件
    with open(calib_path, 'r') as f:
        raw = f.readlines()

    # 解析相机投影矩阵P0-P3
    P0 = np.array(list(map(float, raw[0].split()[1:]))).reshape((3, 4))
    P1 = np.array(list(map(float, raw[1].split()[1:]))).reshape((3, 4))
    P2 = np.array(list(map(float, raw[2].split()[1:]))).reshape((3, 4))
    P3 = np.array(list(map(float, raw[3].split()[1:]))).reshape((3, 4))

    # 解析旋转矩阵R0并扩展为4x4齐次矩阵
    R0 = np.array(list(map(float, raw[4].split()[1:]))).reshape((3, 3))
    R0 = np.hstack((R0, np.array([[0], [0], [0]])))
    R0 = np.vstack((R0, np.array([0, 0, 0, 1])))

    # 解析雷达到相机的变换矩阵
    lidar2camera_m = np.array(list(map(float, raw[5].split()[1:]))).reshape((3, 4))
    lidar2camera_m = np.vstack((lidar2camera_m, np.array([0, 0, 0, 1])))

    # 解析IMU到雷达的变换矩阵
    imu2lidar_m = np.array(list(map(float, raw[6].split()[1:]))).reshape((3, 4))
    imu2lidar_m = np.vstack((imu2lidar_m, np.array([0, 0, 0, 1])))

    return P0, P1, P2, P3, R0, lidar2camera_m, imu2lidar_m


def lidar2camera(point_in_lidar, extrinsic):
    """
    雷达系到相机系投影
    :param point_in_lidar: numpy.ndarray `N x 3`
    :param extrinsic: numpy.ndarray `4 x 4`
    :return: point_in_camera numpy.ndarray `N x 3`
    """
    # 将点转换为齐次坐标
    point_in_lidar = np.hstack((point_in_lidar, np.ones(shape=(point_in_lidar.shape[0], 1)))).T
    # 应用外参变换
    point_in_camera = np.matmul(extrinsic, point_in_lidar)[:-1, :]  # (X, Y, Z)
    point_in_camera = point_in_camera.T
    return point_in_camera


def camera2image(point_in_camera, intrinsic):
    """
    相机系到图像系投影
    :param point_in_camera: point_in_camera numpy.ndarray `N x 3`
    :param intrinsic: numpy.ndarray `3 x 3` or `3 x 4`
    :return: point_in_image numpy.ndarray `N x 3` (u, v, z)
    """
    # 转置点坐标
    point_in_camera = point_in_camera.T
    point_z = point_in_camera[-1]

    # 如果内参矩阵是3x3，添加平移项
    if intrinsic.shape == (3, 3):
        intrinsic = np.hstack((intrinsic, np.zeros((3, 1))))

    # 转换为齐次坐标
    point_in_camera = np.vstack((point_in_camera, np.ones((1, point_in_camera.shape[1]))))
    # 投影到图像平面
    point_in_image = (np.matmul(intrinsic, point_in_camera) / point_z)
    point_in_image[-1] = point_z
    point_in_image = point_in_image.T
    return point_in_image


def lidar2image(point_in_lidar, extrinsic, intrinsic):
    """
    雷达系到图像系投影  获得(u, v, z)
    :param point_in_lidar: numpy.ndarray `N x 3`
    :param extrinsic: numpy.ndarray `4 x 4`
    :param intrinsic: numpy.ndarray `3 x 3` or `3 x 4`
    :return: point_in_image numpy.ndarray `N x 3` (u, v, z)
    """
    # 先将雷达点云转换到相机坐标系
    point_in_camera = lidar2camera(point_in_lidar, extrinsic)
    # 再将相机坐标系的点投影到图像平面
    point_in_image = camera2image(point_in_camera, intrinsic)
    return point_in_image


def get_fov_mask(point_in_lidar, extrinsic, intrinsic, h, w):
    """
    获取fov内的点云mask, 即能够投影在图像上的点云mask
    :param point_in_lidar:   雷达点云 numpy.ndarray `N x 3`
    :param extrinsic:        外参 numpy.ndarray `4 x 4`
    :param intrinsic:        内参 numpy.ndarray `3 x 3` or `3 x 4`
    :param h:                图像高 int
    :param w:                图像宽 int
    :return: point_in_image: (u, v, z)  numpy.ndarray `N x 3`
    :return:                 numpy.ndarray  `1 x N`
    """
    # 将雷达点云投影到图像平面
    point_in_image = lidar2image(point_in_lidar, extrinsic, intrinsic)
    # 筛选出相机前方的点
    front_bound = point_in_image[:, -1] > 0
    # 对图像坐标进行四舍五入
    point_in_image[:, 0] = np.round(point_in_image[:, 0])
    point_in_image[:, 1] = np.round(point_in_image[:, 1])
    # 筛选出在图像范围内的点
    u_bound = np.logical_and(point_in_image[:, 0] >= 0, point_in_image[:, 0] < w)
    v_bound = np.logical_and(point_in_image[:, 1] >= 0, point_in_image[:, 1] < h)
    uv_bound = np.logical_and(u_bound, v_bound)
    # 综合所有条件得到mask
    mask = np.logical_and(front_bound, uv_bound)
    return point_in_image[mask], mask


def perform_fusion_web(pointcloud_path, image_path, calib_path):
    """
    执行点云图像融合处理 - Web适配版本
    返回适合Web前端渲染的数据格式
    
    :param pointcloud_path: 点云文件路径
    :param image_path: 图像文件路径  
    :param calib_path: 标定文件路径
    :return: 融合结果字典
    """
    try:
        # 读取点云数据（包含强度信息）
        point_in_lidar_with_intensity = read_bin(pointcloud_path, intensity=True)  # 读取包含强度的4维数据
        point_in_lidar = point_in_lidar_with_intensity[:, :3]  # 只取xyz坐标用于投影
        
        # 读取彩色图像并转换颜色空间
        color_image = cv2.cvtColor(cv2.imread(image_path), cv2.COLOR_BGR2RGB)
        
        # 读取标定参数
        _, _, P2, _, R0, lidar2camera_matrix, _ = read_calib(calib_path)
        
        # 设置内参和外参（与原始代码完全一致）
        intrinsic = np.matmul(P2, R0)  # 内参
        extrinsic = lidar2camera_matrix  # 外参
        h, w = color_image.shape[:2]  # 图像高和宽
        
        # 获取在图像范围内的点云
        point_in_image, mask = get_fov_mask(point_in_lidar, extrinsic, intrinsic, h, w)
        valid_points = point_in_lidar[mask]  # 在图像范围内的xyz坐标
        valid_points_with_intensity = point_in_lidar_with_intensity[mask]  # 在图像范围内的完整数据（包含强度）
        
        
        # 获取点云对应的颜色（与原始代码完全一致）
        colors = color_image[
            point_in_image[:, 1].astype(np.int32), 
            point_in_image[:, 0].astype(np.int32)
        ]  # N x 3
        
        # 计算点云统计信息
        total_points = len(point_in_lidar)
        valid_points_count = len(valid_points)
        projection_rate = (valid_points_count / total_points) * 100 if total_points > 0 else 0
        
        # 计算点云边界框
        if len(valid_points) > 0:
            min_coords = np.min(valid_points, axis=0)
            max_coords = np.max(valid_points, axis=0)
            center_coords = (min_coords + max_coords) / 2
        else:
            min_coords = max_coords = center_coords = np.array([0, 0, 0])
        
        # 准备Web前端渲染数据格式
        result = {
            # 原始点云数据（包含强度）
            'original_points': {
                'count': total_points,
                'data': point_in_lidar_with_intensity.tolist(),  # 包含强度的完整数据
                'bounds': {
                    'min': np.min(point_in_lidar, axis=0).tolist() if total_points > 0 else [0, 0, 0],
                    'max': np.max(point_in_lidar, axis=0).tolist() if total_points > 0 else [0, 0, 0],
                    'center': ((np.min(point_in_lidar, axis=0) + np.max(point_in_lidar, axis=0)) / 2).tolist() if total_points > 0 else [0, 0, 0]
                }
            },
            
            # 有效点云数据（在视场角内的点）
            'valid_points': {
                'count': valid_points_count,
                'data': valid_points.tolist(),  # xyz坐标
                'data_with_intensity': valid_points_with_intensity.tolist(),  # 包含强度的完整数据
                'bounds': {
                    'min': min_coords.tolist(),
                    'max': max_coords.tolist(),
                    'center': center_coords.tolist()
                }
            },
            
            # 颜色数据
            'colors': colors.tolist(),
            
            # 图像信息
            'image_info': {
                'width': w,
                'height': h,
                'channels': color_image.shape[2] if len(color_image.shape) > 2 else 1
            },
            
            # 投影信息
            'projection_info': {
                'total_points': total_points,
                'valid_points': valid_points_count,
                'projection_rate': projection_rate,
                'fov_mask_ratio': valid_points_count / total_points if total_points > 0 else 0
            },
            
            # 标定信息
            'calibration_info': {
                'intrinsic_matrix': intrinsic.tolist(),
                'extrinsic_matrix': extrinsic.tolist(),
                'image_dimensions': [h, w]
            },
            
            # Three.js渲染格式数据
            'threejs_data': {
                # 原始点云的Three.js格式
                'original_positions': point_in_lidar.flatten().tolist(),
                'original_colors': generate_intensity_colors(point_in_lidar_with_intensity).flatten().tolist(),
                
                # 在图像范围内的原始点云（新增）
                'fov_original_positions': valid_points.flatten().tolist(),
                'fov_original_colors': generate_intensity_colors(valid_points_with_intensity).flatten().tolist(),
                
                # 彩色点云的Three.js格式
                'colored_positions': valid_points.flatten().tolist(),
                'colored_colors': (colors / 255.0).flatten().tolist(),  # 归一化到0-1
                
                # 渲染参数
                'point_count': {
                    'original': total_points,
                    'colored': valid_points_count
                },
                'recommended_point_size': calculate_recommended_point_size(valid_points),
                'camera_suggestion': {
                    'position': (center_coords + np.array([0, 0, np.max(max_coords - min_coords) * 2])).tolist(),
                    'target': center_coords.tolist(),
                    'distance': float(np.max(max_coords - min_coords) * 2)
                }
            }
        }
        
        return result
        
    except Exception as e:
        raise Exception(f"融合处理过程中出错: {str(e)}")


def generate_intensity_colors(points):
    """
    为原始点云生成基于强度或高度的颜色
    :param points: 点云数据 numpy.ndarray `N x 3` or `N x 4`
    :return: 颜色数组 numpy.ndarray `N x 3`
    """
    if points.shape[1] >= 4:
        # 如果有强度信息，使用强度着色
        intensity = points[:, 3]
    else:
        # 否则使用Z坐标（高度）着色
        intensity = points[:, 2]
    
    # 归一化强度值到0-1范围
    if len(intensity) > 0:
        min_intensity = np.min(intensity)
        max_intensity = np.max(intensity)
        if max_intensity > min_intensity:
            normalized_intensity = (intensity - min_intensity) / (max_intensity - min_intensity)
        else:
            normalized_intensity = np.ones_like(intensity) * 0.5
    else:
        normalized_intensity = np.array([])
    
    # 生成颜色（使用简单的灰度映射）
    colors = np.zeros((len(points), 3))
    if len(normalized_intensity) > 0:
        colors[:, 0] = normalized_intensity  # R
        colors[:, 1] = normalized_intensity  # G  
        colors[:, 2] = normalized_intensity  # B
    
    return colors


def calculate_recommended_point_size(points):
    """
    根据点云密度计算推荐的点大小
    :param points: 点云数据
    :return: 推荐的点大小
    """
    if len(points) == 0:
        return 1.0
    
    # 根据点云数量调整点大小
    point_count = len(points)
    if point_count < 1000:
        return 2.0
    elif point_count < 10000:
        return 1.5
    elif point_count < 50000:
        return 1.0
    else:
        return 0.5


# 测试函数
if __name__ == '__main__':
    # 测试融合功能
    image_path = './data_example/3d_detection/image_2/000003.png'
    bin_path = './data_example/3d_detection/velodyne/000003.bin'
    calib_path = './data_example/3d_detection/calib/000003.txt'
    
    try:
        result = perform_fusion_web(bin_path, image_path, calib_path)
        print("✅ Web融合测试成功!")
        print(f"原始点云数量: {result['original_points']['count']}")
        print(f"有效点云数量: {result['valid_points']['count']}")
        print(f"投影成功率: {result['projection_info']['projection_rate']:.2f}%")
    except Exception as e:
        print(f"❌ 测试失败: {e}")
