import cv2
import numpy as np
import os
from typing import List, Tuple


def load_images(file_path: str) -> Tuple[List[str], List[float]]:
    """加载图像路径和时间戳"""
    image_filenames = []
    timestamps = []
    with open(file_path, 'r') as f:
        for line in f:
            if line.strip():
                parts = line.strip().split()
                timestamp = float(parts[0])
                img_name = parts[3]
                timestamps.append(timestamp)
                image_filenames.append(img_name)
    return image_filenames, timestamps

def undistort_image(image: np.ndarray, K: np.ndarray, D: np.ndarray) -> np.ndarray:
    """去畸变图像"""
    h, w = image.shape[:2]
    new_K = cv2.fisheye.estimateNewCameraMatrixForUndistortRectify(K, D, (w, h), None)
    map1, map2 = cv2.fisheye.initUndistortRectifyMap(K, D, np.eye(3), new_K, (w, h), cv2.CV_16SC2)
    undistorted_img = cv2.remap(image, map1, map2, interpolation=cv2.INTER_LINEAR)
    return undistorted_img, new_K

def depth_to_points(depth_image: np.ndarray, fx: float, fy: float, cx: float, cy: float, max_depth: float = 7.0) -> np.ndarray:
    """将深度图转换为点云"""
    h, w = depth_image.shape
    points = []
    for r in range(h):
        for c in range(w):
            z = depth_image[r, c] / 1000.0  # 深度值转为米
            if 0.2 < z < max_depth:
                x = (c - cx) * z / fx
                y = (r - cy) * z / fy
                points.append([x, y, z])
    return np.array(points)

def show_point_on_rgb(left_img, depth_show_th_mm, points):
    # rgb_show = cv2.cvtColor(left_img, cv2.COLOR_GRAY2RGB)
    rgb_show = left_img

    # 将深度阈值从毫米转换为米
    show_depth_th_m = depth_show_th_mm / 1000.0

    # 遍历点云
    for point in points:
        ux, uy, depth = point  # 提取点的 x, y 坐标和深度值
        ux, uy = int(ux), int(uy)  # 转换为整数像素坐标

        # 计算深度权重
        weight = depth / show_depth_th_m
        weight = max(0.0, min(1.0, weight))  # 限制权重在 [0, 1] 范围内

        rgb_show[uy, ux] = (255, 0, 0)  # 蓝色 (B, G, R)

    return rgb_show


def project_points_to_rgb1(points_3d: np.ndarray, new_K: np.ndarray, D: np.ndarray) -> np.ndarray:
    # 1. 归一化坐标 (x', y')
    points_2d = points_3d[:, :2] / points_3d[:, 2, None]

    # 2. 转换为齐次坐标 (x', y', 1)
    points_2d_homo = np.hstack((points_2d, np.ones((points_2d.shape[0], 1))))

    # 3. 投影到像素坐标系 (u, v)
    points_pixel_homo = (new_K @ points_2d_homo.T).T
    points_pixel = points_pixel_homo[:, :2] / points_pixel_homo[:, 2, None]

    # 4. 绑定深度信息 (u, v, depth)
    points_with_depth = np.hstack((points_pixel, points_3d[:, 2:3]))

    # 5. 根据高度阈值过滤点
    filtered_points = points_with_depth[points_with_depth[:, 2] >= 0.25]

    return filtered_points

def project_points_to_rgb(points: np.ndarray, K: np.ndarray, D: np.ndarray) -> np.ndarray:
    """将点云投影到RGB图像"""
    points_2d = points[:, :2] / points[:, 2, None]  # 归一化
    points_2d = cv2.fisheye.distortPoints(points_2d.reshape(-1, 1, 2), K, D)
    # 将深度信息绑定到投影点上
    points_with_fisheye_depth = np.hstack((points_2d.reshape(-1,2), points[:, 2:3]))  # (u, v, depth)
    
    # return points_with_fisheye_depth, points_with_undistort_depth
    return points_with_fisheye_depth

def compute_depth_pseudo_color(depth_image: np.ndarray, max_depth: float = 5.0) -> np.ndarray:
    """将深度图转为伪彩色图像"""
    depth_normalized = np.clip(depth_image / (max_depth * 1000), 0, 1)
    depth_colored = cv2.applyColorMap((depth_normalized * 255).astype(np.uint8), cv2.COLORMAP_JET)
    return depth_colored

def save_to_pcd(points, output_file):
    num_points = points.shape[0]
    dim = points.shape[1]

    assert dim in [3, 4], "点云数据必须是 (N, 3) 或 (N, 4) 的格式！"

    # PCD 文件头
    header = [
        "# .PCD v0.7 - Point Cloud Data file format",
        "VERSION 0.7",
        f"FIELDS {'x y z intensity' if dim == 4 else 'x y z'}",
        f"SIZE {'4 4 4 4' if dim == 4 else '4 4 4'}",
        f"TYPE {'F F F F' if dim == 4 else 'F F F'}",
        f"COUNT {'1 1 1 1' if dim == 4 else '1 1 1'}",
        f"WIDTH {num_points}",
        "HEIGHT 1",
        "VIEWPOINT 0 0 0 1 0 0 0",
        f"POINTS {num_points}",
        "DATA ascii"
    ]

    # 写入 PCD 文件
    with open(output_file, "w") as f:
        f.write("\n".join(header) + "\n")
        np.savetxt(f, points, fmt="%.6f")

    print(f"点云已保存为 PCD 文件：{output_file}")


def main(data_path: str):
    # 加载相机参数
    K = np.array([[380.977051, 0, 518.386719],
                  [0, 380.876495, 337.40976],
                  [0, 0, 1]])
    D = np.array([0.045951847, -0.011027799, 0.007337844, -0.00242689601])

    # 加载图像和深度文件列表
    image_files, image_timestamps = load_images(os.path.join(data_path, "left_ir.txt"))
    depth_files, depth_timestamps = load_images(os.path.join(data_path, "depth.txt"))
    i = 0
    for img_file, img_timestamp in zip(image_files, image_timestamps):
        # 加载图像
        if i == 1:
            continue
        i = 1
        img_path = os.path.join(data_path, img_file)
        # image = cv2.imread(img_path, cv2.IMREAD_UNCHANGED)
        # 从 raw 文件中加载数据
        width = 1024
        height = 768
        channels = 1
        # raw_data = cv2.imread(img_path, cv2.IMREAD_UNCHANGED)

        with open(img_path, 'rb') as f:
            raw_data = np.fromfile(f, dtype=np.uint8)

        # 检查数据大小是否匹配
        # if raw_data.size != width * height * channels:
        #     raise ValueError("Raw data size does not match the specified dimensions!")
        # 将 YUV 数据转换为 NumPy 数组
        yuv_array = np.frombuffer(raw_data, dtype=np.uint8)
        yuv_image = yuv_array.reshape((height * 3 // 2, width))
        bgr_image = cv2.cvtColor(yuv_image, cv2.COLOR_YUV2BGR_I420)
        # 将数据重塑为图像

        cv2.imwrite("results/left-fisheye.jpg", bgr_image)

        image = bgr_image
        # return 
        if image is None:
            continue

        # 去畸变
        undistorted_image, new_K = undistort_image(image, K, D)
        cv2.imwrite("results/left-fisheye-undistort.jpg", undistorted_image)
        # 显示去畸变图像
        # cv2.imshow("Undistorted Image", undistorted_image)
        # cv2.waitKey(1)

        # 匹配深度图
        closest_depth_idx = np.argmin([abs(img_timestamp - dt) for dt in depth_timestamps])
        depth_path = os.path.join(data_path, depth_files[closest_depth_idx])
        # depth_image = cv2.imread(depth_path, cv2.IMREAD_UNCHANGED)

        with open(depth_path, 'rb') as f:
            depth_data = np.fromfile(f, dtype=np.uint16)




        depth_height = 480
        depth_witdh = 640
        # Step 2: 重塑为二维图像
        depth_map = depth_data.reshape((depth_height, depth_witdh))
        # 初始化 max 和 min 深度值
        max_d = 0
        min_d = 65536

        # 创建用于可视化的灰度图
        showDepth = np.zeros_like(depth_map, dtype=np.uint8)

        # 遍历深度图
        for r_d in range(depth_map.shape[0]):  # 遍历行
            for c_d in range(depth_map.shape[1]):  # 遍历列
                # 当前像素的深度值（单位：毫米）
                depth_value = depth_map[r_d, c_d]

                # 更新最大值和最小值
                max_d = max(depth_value, max_d)
                min_d = min(depth_value, min_d)

                # 转换为米
                depth_now = depth_value / 1000.0  # 转换为米

                # 判断是否在有效范围内（0.2m ~ 2m）
                if 0.2 < depth_now < 2:
                    pass  # 可添加调试打印信息

                # 将深度值映射到灰度值（5m -> 255）
                value_gray = int(depth_now * 255 / 5)  # 映射到灰度值范围

                # 限制灰度值在 [0, 255] 范围
                value_gray = np.clip(value_gray, 0, 255)

                # 更新灰度图
                showDepth[r_d, c_d] = value_gray

                # 打印最大值和最小值
                # print(f"Maximum depth value: {max_d} mm")
                # print(f"Minimum depth value: {min_d} mm")

                # 显示灰度图
                # cv2.imshow("Depth Visualization", showDepth)

        # 调整大小为 2 倍
        depth_map_resized = cv2.resize(showDepth, (depth_map.shape[1] * 2, depth_map.shape[0] * 2), interpolation=cv2.INTER_NEAREST)
        cv2.imwrite("results/depth_1280x960.jpg", depth_map_resized)

        # 640 x 480
        # 1024 x 768
        # 深度图转点云
        # 定义深度相机的参数数组
        depthparam_param_after_rotate = [161.301956, 161.301956, 334.934448, 201.649963, -54.1767044]

        # 打印参数
        print("Depth parameters after rotation:", depthparam_param_after_rotate)

        # 如果需要单独访问某个参数
        dfx = depthparam_param_after_rotate[0]  # 焦距 fx
        dfy = depthparam_param_after_rotate[1]  # 焦距 fy
        dcx = depthparam_param_after_rotate[2]  # 主点 cx
        dcy = depthparam_param_after_rotate[3]  # 主点 cy
        ddepth_offset = depthparam_param_after_rotate[4]  # 深度校正偏移量

        print(f"fx: {dfx}, fy: {dfy}, cx: {dcx}, cy: {dcy}, depth_offset: {ddepth_offset}")

        # points = depth_to_points(depth_map, K[0, 0], K[1, 1], K[0, 2], K[1, 2])
        points = depth_to_points(depth_map, dfx, dfy, dcx, dcy)
        save_to_pcd(points, 'results/depth.pcd')

        # 点云投影到RGB图像        
        rgb_points = project_points_to_rgb(points, K, D)
        rgb_undistort_points = project_points_to_rgb1(points, new_K, D)
        # left_img, depth_show_th_mm, points
        rgb_show = show_point_on_rgb(bgr_image, 7000, rgb_points)
        rgb_undistort_show = show_point_on_rgb(undistorted_image, 7000, rgb_undistort_points)
        cv2.imwrite("results/rgb_fisheye_show.jpg", rgb_show)
        cv2.imwrite("results/rgb_fisheye_undistort_show.jpg", rgb_undistort_show)

        # cv2.imwrite("result/point_2_rgb.jpg",rgb_points)

        # 深度图伪彩色显示
        pseudo_color_depth = compute_depth_pseudo_color(depth_map)
        cv2.imwrite("results/point_2_rgb_pseudo.jpg",pseudo_color_depth )

        pass
        # cv2.imshow("Depth Pseudo Color", pseudo_color_depth)
        # cv2.waitKey(1)

if __name__ == "__main__":
    data_path = "/home/JSDC/017254/code/data_2025_03_25_1_board"  # 替换为你的数据路径
    data_path = "/home/xuce/workspace/dataset/0331/20250331070337"  # 替换为你的数据路径
    main(data_path)
