import rosbag
from sensor_msgs.msg import Image
from nav_msgs.msg import Odometry
from cv_bridge import CvBridge
import cv2
import numpy as np
import os
from tf.transformations import quaternion_matrix
import bisect
import open3d as o3d
from tqdm import tqdm

# mode: rosbag、separate_file
mode = "rosbag"
bag_file = "/home/zxw/dataset/semantic/self_collect/change_test/realsense_orb_slam_for_multi_2025-05-30-17-28-25.bag"  # 替换为你自己的 .bag 文件路径
file_dir = "/mnt/data/3d-lidar/semantic/self_collect/kfs" # 替换为你自己的文件目录

rgb_topic = "/camera/color/image_raw"
depth_topic = "/camera/aligned_depth_to_color/image_raw"
pose_topic = "/orb_pose"

# 深度图有效距离范围
max_range = 3.0
min_range = 0.1

# 设置输出目录
output_dir = "/home/zxw/dataset/semantic/self_collect/change_test/realsense_orb_slam_for_multi_2025-05-30-17-28-25"
os.makedirs(output_dir, exist_ok=True)

# 初始化cv_bridge
bridge = CvBridge()

max_samples = 1000  # 控制最大保存数量
skip_step = 2  # 采样步长，每 skip_step 取一组数据

# 时间戳容忍误差（单位：秒）
slop = 0.01

K = np.array([[384.281, 0.0, 329.505],
                    [0.0, 383.770, 242.564],
                    [0.0, 0.0, 1.0]])

T_body_camera = np.array([[1.0, 0.0, 0.0, 0.0],
                            [0.0, 1.0, 0.0, 0.0],
                            [0.0, 0.0, 1.0, 0.0],
                            [0.0, 0.0, 0.0, 1.0]])

# 数据同步时，以哪种数据作为基准 rgb、depth、pose
BASE_LIST_NAME = "rgb"

# 仅拼接数据
only_combine = False

def get_time(msg):
    return msg.header.stamp.to_sec()


def synchronize_messages(pose_list, rgb_list, depth_list, slop, base_data_list):
    """
    base_data_list 是 pose_list, rgb_list, depth_list 中的一个，作为数据选取的基准
    """
    matches = []
    for base in base_data_list:
        base_time = get_time(base)

        # 在 pose 中找最接近 base_time 的消息
        idx = bisect.bisect_left([get_time(pose) for pose in pose_list], base_time)
        if idx >= len(pose_list):
            continue  # 越界跳过
        pose = pose_list[idx]
        if abs(get_time(pose) - base_time) > slop:
            continue

        # 在 RGB 中找最接近 base_time 的消息
        idx = bisect.bisect_left([get_time(rgb) for rgb in rgb_list], base_time)
        if idx >= len(rgb_list):
            continue  # 越界跳过
        rgb_candidate = rgb_list[idx]
        if abs(get_time(rgb_candidate) - base_time) > slop:
            continue  # 不满足时间容差

        # 在 Depth 中找最接近 base_time 的消息
        idx = bisect.bisect_left([get_time(depth) for depth in depth_list], base_time)
        if idx >= len(depth_list):
            continue
        depth_candidate = depth_list[idx]
        if abs(get_time(depth_candidate) - base_time) > slop:
            continue

        # 匹配成功
        matches.append((rgb_candidate, depth_candidate, pose, base_time))

    return matches

def process_matches(matches):
    data_counter = 0

    timestamp_path = os.path.join(output_dir, "timestamps.csv")
    ts_file = open(timestamp_path, "w")
    ts_file.write("ImageID,TimeStamp\n")

    for rgb_image, depth_32f, pose, ts in tqdm(matches, desc="save file"):
        if data_counter >= max_samples:
            break
        try:
            id_str = f"{data_counter:06d}"
            # 转换RGB图像
            rgb_path = os.path.join(output_dir, id_str + "_color.png")
            cv2.imwrite(rgb_path, rgb_image)

            # 转换深度图像
            depth_path = os.path.join(output_dir, id_str + "_depth.tiff")
            cv2.imwrite(depth_path, depth_32f)

            # 位姿保存为文本格式
            pose_txt_path = os.path.join(output_dir, id_str + "_pose.txt")
            with open(pose_txt_path, 'w') as f:
                pose_world_camera = pose @ T_body_camera
                for row in pose_world_camera:
                    f.write(" ".join(map(str, row)) + "\n")

            # 时间戳保存
            ts = int(ts * 1e9)
            ts_str = f"{ts:19d}"
            ts_file.write(id_str + "," + ts_str + "\n")

            data_counter += 1

        except Exception as e:
            print(f"Error processing messages: {e}")


def depth_to_point_cloud(depth_image, rgb_image, K, depth_scale=1.0, max_range=2.0, min_range=0.1):
    h, w = depth_image.shape
    fx, fy = K[0, 0], K[1, 1]
    cx, cy = K[0, 2], K[1, 2]

    # 创建 u, v 坐标网格
    u, v = np.meshgrid(np.arange(w), np.arange(h))
    z = depth_image * depth_scale

    # 过滤无效深度值
    valid = (z > min_range) & (z < max_range)
    u_valid = u[valid]
    v_valid = v[valid]
    z_valid = z[valid]

    # 计算 x, y
    x = (u_valid - cx) * z_valid / fx
    y = (v_valid - cy) * z_valid / fy

    points = np.stack((x, y, z_valid), axis=-1)

    # 提取颜色（RGB）
    rgb_valid = rgb_image[valid]
    colors = rgb_valid[..., ::-1] / 255.0  # BGR -> RGB 并归一化

    return points, colors

def transform_point_cloud(points, T):
    """
    将点云变换到新的坐标系
    :param points: Nx3 numpy数组
    :param T: 4x4 变换矩阵
    :return: 变换后的点云 Nx3
    """
    ones = np.ones((points.shape[0], 1))
    points_homogeneous = np.hstack([points, ones])  # Nx4
    transformed_points = (T @ points_homogeneous.T).T[:, :3]  # Nx3
    return transformed_points

def combine_depth_data(custum_data, max_range=2.0, min_range=0.1):
    global K

    data_counter = 0
    print("camera intrinsic: {}".format(K))
    print("max range: {}, min range: {}".format(max_range, min_range))

    global_point_cloud = []
    global_colors = []
    print("开始拼接点云")
    for rgb, depth_32f, pose, _ in tqdm(custum_data, desc="combine depth data"):
        if data_counter >= max_samples:
            break

        # 生成点云并变换
        local_points, colors = depth_to_point_cloud(depth_32f, rgb, K, max_range=max_range, min_range=min_range)
        if len(local_points) < 100:
            print("valid depth points less than 100")
            continue

        global_points = transform_point_cloud(local_points, pose)

        # 合并进全局点云
        global_point_cloud.append(global_points)
        global_colors.append(colors)

        data_counter += 1

    print("全局点云合并完成, 开始降采样并写入文件")
    # 最终合并成一个大点云
    final_point_cloud = np.vstack(global_point_cloud)
    final_colors = np.vstack(global_colors)
    pcd = o3d.geometry.PointCloud()
    pcd.points = o3d.utility.Vector3dVector(final_point_cloud)
    pcd.colors = o3d.utility.Vector3dVector(final_colors)

    # 降采样：设置 voxel_size 为合适的数值，如 0.05 表示 5cm
    voxel_size = 0.02  # 单位：米
    downsampled_pcd = pcd.voxel_down_sample(voxel_size=voxel_size)

    # 保存降采样后的点云
    output_file_name = "pointcloud_map_downsampled_max_range_" + str(max_range) + ".pcd"
    o3d.io.write_point_cloud(os.path.join(output_dir, output_file_name), downsampled_pcd)

def rosmsg_to_custom(matches):
    result_rgb_depth_pose = []
    for rgb_msg, depth_msg, pose_msg, ts in tqdm(matches, desc="rosmsg to custom"):
        # 转换RGB图像
        rgb_image = bridge.imgmsg_to_cv2(rgb_msg, "bgr8")

        # 转换深度图像
        depth_16u = bridge.imgmsg_to_cv2(depth_msg, desired_encoding="16UC1")
        depth_32f = depth_16u.astype(np.float32) * 0.001  # mm -> m

        # 提取位姿并构造变换矩阵
        transform = pose_msg.pose.pose
        position = [transform.position.x, transform.position.y, transform.position.z]
        orientation = [transform.orientation.x, transform.orientation.y,
                        transform.orientation.z, transform.orientation.w]
        pose = quaternion_matrix(orientation)
        pose[:3, 3] = position

        result_rgb_depth_pose.append((rgb_image, depth_32f, pose, ts))
    return result_rgb_depth_pose


def downsample_point_cloud():
    # 1. 读取点云文件
    pcd_path = os.path.join(output_dir, "pointcloud_map.pcd")
    pcd = o3d.io.read_point_cloud(pcd_path)

    # 2. 检查是否成功读取
    if not pcd:
        raise ValueError("无法加载点云，请检查文件路径是否正确")

    # 3. 降采样 - 使用 voxel_down_sample 方法
    voxel_size = 0.02  # 单位：米，根据需要调整
    downsampled_pcd = pcd.voxel_down_sample(voxel_size=voxel_size)

    # 5. 保存降采样后的点云（可选）
    output_path = os.path.join(output_dir, "downsampled_pointcloud.pcd")
    o3d.io.write_point_cloud(output_path, downsampled_pcd)

def read_data_from_rosbag(bag_file):
    global rgb_topic
    global depth_topic
    global pose_topic

    # 缓存所有消息
    rgb_msgs = []
    depth_msgs = []
    pose_msgs = []

    try:
        with rosbag.Bag(bag_file, 'r') as bag:
            for topic, msg, t in bag.read_messages(topics=[rgb_topic, depth_topic, pose_topic]):
                if topic == rgb_topic:
                    rgb_msgs.append(msg)
                elif topic == depth_topic:
                    depth_msgs.append(msg)
                elif topic == pose_topic:
                    pose_msgs.append(msg)
    except Exception as e:
        print(f"Failed to read bag file: {e}")
        return

    print(f"Read {len(rgb_msgs)} RGB messages, {len(depth_msgs)} Depth messages, {len(pose_msgs)} Pose messages.")

    # 同步消息
    if BASE_LIST_NAME == "rgb":
        base_msgs = rgb_msgs
    elif BASE_LIST_NAME == "depth":
        base_msgs = depth_msgs
    elif BASE_LIST_NAME == "pose":
        base_msgs = pose_msgs
    else:
        base_msgs = rgb_msgs

    matches = synchronize_messages(pose_msgs, rgb_msgs, depth_msgs, slop, base_msgs)
    print(f"Matched {len(matches)} messages.")
    return rosmsg_to_custom(matches)

def read_from_filesystem(data_dir):
    """
    从文件系统中读取 rgb、depth 和 pose 数据
    :param data_dir: 包含 rgb、depth 和 poses.txt 的目录
    :return: 列表，元素为 (rgb_image, depth_32f, pose_matrix)
    """
    rgb_dir = os.path.join(data_dir, "rgb")
    depth_dir = os.path.join(data_dir, "depth")
    pose_path = os.path.join(data_dir, "poses.txt")

    # 读取位姿文件
    poses_dict = {}
    with open(pose_path, 'r') as f:
        for line in f:
            if not line.strip():
                continue
            parts = line.strip().split()
            timestamp = float(parts[0])
            x, y, z = map(float, parts[1:4])
            qx, qy, qz, qw = map(float, parts[4:8])
            T = quaternion_matrix([qx, qy, qz, qw])
            T[:3, 3] = [x, y, z]
            poses_dict[timestamp] = T

    # 获取所有RGB和Depth的时间戳（以文件名作为时间戳）
    def get_timestamps(folder):
        files = os.listdir(folder)
        timestamps = []
        for f in files:
            if f.endswith(".png"):
                try:
                    # 去掉 .png 后缀，将剩余部分转为浮点时间戳
                    ts_str = f[:-4]  # 去掉最后的 .png
                    ts = float(ts_str)
                    timestamps.append((ts, f))
                except ValueError:
                    continue  # 跳过无法解析的文件
        return sorted(timestamps, key=lambda x: x[0])

    rgb_timestamps = get_timestamps(rgb_dir)
    depth_timestamps = get_timestamps(depth_dir)

    result = []

    print("开始匹配 RGB、Depth 和 Pose 数据...")
    for ts, rgb_file in tqdm(rgb_timestamps, desc="Matching RGB to Depth/Pose"):
        # 在 depth 中找最接近的帧
        idx = bisect.bisect_left([t for t, _ in depth_timestamps], ts)
        candidates = []
        if idx < len(depth_timestamps):
            candidates.append(depth_timestamps[idx])
        if idx > 0:
            candidates.append(depth_timestamps[idx - 1])
        if not candidates:
            continue
        # 取最接近的一个
        best_depth = min(candidates, key=lambda x: abs(x[0] - ts))
        depth_ts, depth_file = best_depth

        # 查看是否有对应的位姿
        if ts not in poses_dict:
            continue

        # 加载图像
        rgb_img = cv2.imread(os.path.join(rgb_dir, rgb_file))
        depth_img = cv2.imread(os.path.join(depth_dir, depth_file), cv2.IMREAD_UNCHANGED)

        if depth_img is None:
            continue

        # 转换深度图到 float32 (假设单位是 mm，转换为 m)
        depth_32f = depth_img.astype(np.float32) * 0.001

        # 获取位姿矩阵
        pose = poses_dict[ts]

        result.append((rgb_img, depth_32f, pose, ts))

    print(f"成功加载 {len(result)} 组数据")
    return result

def sample_input_data(matches, skip_step = 1):
    selected_matches = []
    counter = 0
    for match in matches:
        counter += 1
        if counter % skip_step != 0:
            continue
        selected_matches.append(match)
    return selected_matches

def main():
    if mode == "rosbag":
        print("从 .bag 文件中读取数据")
        matches = read_data_from_rosbag(bag_file)
    elif mode == "separate_file":
        print("从文件夹中读取数据")
        matches = read_from_filesystem(file_dir)
    else:
        raise ValueError("Invalid mode. Please choose 'rosbag' or 'separate_file'.")

    print(f"Found {len(matches)} synchronized message groups.")

    matches = sample_input_data(matches, skip_step)

    print(f"Using {len(matches)} samples. skip_step = {skip_step}")

    # 处理并保存数据
    if only_combine:
        combine_depth_data(matches, max_range=max_range, min_range=min_range)
    else:
        process_matches(matches)

    print("Finished processing bag file.")


if __name__ == '__main__':
    main()