#!/usr/bin/env python3
import rclpy
from rclpy.node import Node
from sensor_msgs.msg import PointCloud2, PointField
import numpy as np
import struct
import open3d as o3d
import time

# --- 函数定义部分 (保持不变) ---
def pointcloud2_to_numpy(msg: PointCloud2) -> np.ndarray:
    # ... (代码与之前版本相同，此处省略以保持简洁)
    num_points = msg.width * msg.height; point_step = msg.point_step; data = msg.data; offsets = {}; dtypes = {PointField.INT8: 'b', PointField.UINT8: 'B', PointField.INT16: 'h', PointField.UINT16: 'H', PointField.INT32: 'i', PointField.UINT32: 'I', PointField.FLOAT32: 'f', PointField.FLOAT64: 'd'};
    for field in msg.fields:
        if field.name in ['x', 'y', 'z', 'intensity', 'reflectivity']:
            offsets[field.name] = field.offset; dtypes[field.name] = dtypes[field.datatype]
    if not all(k in offsets for k in ['x', 'y', 'z']): raise ValueError("PointCloud2消息缺少x, y, 或 z字段")
    has_intensity = 'intensity' in offsets or 'reflectivity' in offsets; intensity_key = 'intensity' if 'intensity' in offsets else 'reflectivity'
    points = [];
    for i in range(num_points):
        base_idx = i * point_step; x = struct.unpack_from(dtypes['x'], data, offset=base_idx + offsets['x'])[0]; y = struct.unpack_from(dtypes['y'], data, offset=base_idx + offsets['y'])[0]; z = struct.unpack_from(dtypes['z'], data, offset=base_idx + offsets['z'])[0]; intensity = 0.0
        if has_intensity: intensity = struct.unpack_from(dtypes[intensity_key], data, offset=base_idx + offsets[intensity_key])[0]
        points.append([x, y, z, intensity])
    return np.array(points, dtype=np.float32)

FIELDS_OUT = [PointField(name='x', offset=0, datatype=PointField.FLOAT32, count=1), PointField(name='y', offset=4, datatype=PointField.FLOAT32, count=1), PointField(name='z', offset=8, datatype=PointField.FLOAT32, count=1), PointField(name='intensity', offset=12, datatype=PointField.FLOAT32, count=1)]
POINT_STEP_OUT = 16

def create_pointcloud2_msg(header, points_array):
    pcl_msg = PointCloud2(); pcl_msg.header = header; pcl_msg.height = 1; pcl_msg.width = len(points_array); pcl_msg.fields = FIELDS_OUT; pcl_msg.is_bigendian = False; pcl_msg.point_step = POINT_STEP_OUT; pcl_msg.row_step = POINT_STEP_OUT * pcl_msg.width; pcl_msg.is_dense = False; buffer = bytearray(pcl_msg.row_step);
    for i, point in enumerate(points_array): struct.pack_into('ffff', buffer, i * POINT_STEP_OUT, point[0], point[1], point[2], point[3])
    pcl_msg.data = buffer
    return pcl_msg

class LidarProcessor(Node):
    def __init__(self):
        super().__init__('lidar_processor')
        # ... ROS与可视化初始化 ...
        self.declare_parameter('input_cloud', '/livox/lidar')
        input_topic = self.get_parameter('input_cloud').get_parameter_value().string_value
        self.sub = self.create_subscription(PointCloud2, input_topic, self.callback, 10)
        self.pcl_pub = self.create_publisher(PointCloud2, '/livox/pointcloud2_processed', 10)
        self.get_logger().info(f"订阅: {input_topic}，发布可视化点云到: /livox/pointcloud2_processed")
        self.pcd = o3d.geometry.PointCloud()
        self.vis = o3d.visualization.Visualizer()
        self.vis.create_window()
        opt = self.vis.get_render_option()
        opt.point_size = 3.0
        opt.background_color = np.array([0, 0, 0])
        self.is_first_frame = True
        
    
        # --- 可调参数 ---
        
        # 状态变量，无需手动调节
        self.frame_count = 0                 # 帧计数器，用于日志输出
        self.estimated_ground_x = None       # 存储和追踪稳定地面深度的状态变量，程序会自动更新

        # 1. 地面分割参数 (目标：稳定、准确地识别绿色地面)
        self.enable_ground_removal = True        # 总开关：是否启用地面移除功能。设为 False 则完全跳过地面检测。
        self.ground_search_radius = 8          # 地面搜索半径 (单位：米)：以LiDAR正下方为中心，在此水平半径范围内搜索地面点。
        self.ground_ransac_distance = 0.2        # RANSAC地面容差 (单位：米)：点距离拟合平面的最大距离。适用于坑洼地面可增大此值，平整地面可减小此值。
        self.ground_min_points_ratio = 0.05      # 最低地面点比例：一个平面至少要占搜索区域总点数的这个比例(5%)，才被认为是有效地面，用于排除小的干扰平面。
        self.ground_level_smoothing_factor = 0.1 # 地面深度平滑因子 (0.0-1.0)：用于稳定地面高度的估计值。值越小，估计值越稳定，但对地形变化响应越慢。推荐使用较小值(0.05-0.1)。

        # 2. 粗定位ROI参数 (目标：用一个虚拟圆柱体包裹住绳索和吊钩)
        self.coarse_search_radius = 1.0          # 粗搜索半径 (单位：米)：定义了用于寻找绳索的垂直圆柱体的水平半径。应略大于吊钩摇摆的最大范围。
        self.coarse_search_min_x = 0.5           # 粗搜索最小深度 (单位：米)：圆柱体的起始深度，用于避免检测到安装LiDAR的小车自身。
        self.coarse_search_max_x = 50.0          # 粗搜索最大深度 (单位：米)：圆柱体的结束深度，应设置为塔吊的最大工作高度。
        
        # 3. 绳索平面检测参数 (目标：准确识别蓝色的绳索平面)
        self.rope_plane_ransac_distance = 0.1    # RANSAC绳索平面容差 (单位：米)：点的距离小于此值才被认为是绳索平面的一部分。如果绳索因摇摆变形导致检测不稳，可增大此值。
        self.rope_min_points = 30                # 最少绳索点数：构成一个有效绳索平面所需的最少点数。可防止噪声被误识别。

        # 4. 【核心】吊钩精炼 (Fine-Tuning) 参数 (目标：从绳索末端精确分离出红色的吊钩)
        # 作用：在粗定位的吊钩末端，以此为中心，进行一次小范围的精确聚类，以完整捕获吊钩并排除周围噪点。
        self.hook_refinement_radius = 0.6        # 精炼区域半径 (单位：米)：以粗定位的吊钩末端为球心，进行最终聚类的球形区域半径。应略大于吊钩的实际最大尺寸。
        self.hook_refinement_cluster_eps = 0.2   # 精炼聚类邻域半径 (单位：米)：在精炼区域内进行DBSCAN聚类时的`eps`值。如果红色吊钩碎裂，可增大此值；如果包含过多噪点，可减小此值。
        self.hook_refinement_min_points = 15     # 最少吊钩点数：在精炼区域内，构成一个最终吊钩簇所需的最小点数。

  

    def callback(self, msg: PointCloud2):
        start_time = time.time()
        
        try: points_array = pointcloud2_to_numpy(msg)
        except (ValueError, KeyError) as e: self.get_logger().error(f"解析PointCloud2消息失败: {e}"); return
        if points_array.shape[0] == 0: self.get_logger().warn("接收到空点云，跳过处理。"); return
        num_original_points = len(points_array)

        # 初始化标签数组 (0:背景, 1:地面, 2:吊钩, 3:绳索, 4:ROI内其他)
        point_labels = np.zeros(num_original_points, dtype=int)
        
        # --- 步骤 1: 全局地面分割 ---
        non_ground_mask = np.ones(num_original_points, dtype=bool)
        if self.enable_ground_removal:
            # ... (地面分割逻辑与之前版本相同，省略以保持简洁) ...
            dist_yz_all = np.sqrt(points_array[:, 1]**2 + points_array[:, 2]**2); ground_search_mask = (dist_yz_all <= self.ground_search_radius); points_for_ground_search = points_array[ground_search_mask]
            if len(points_for_ground_search) > 100:
                pcd_ground_search = o3d.geometry.PointCloud(); pcd_ground_search.points = o3d.utility.Vector3dVector(points_for_ground_search[:, :3]); plane_model, inliers = pcd_ground_search.segment_plane(distance_threshold=self.ground_ransac_distance, ransac_n=3, num_iterations=100)
                normal_vector = plane_model[:3]; is_horizontal_plane = np.abs(normal_vector[0]) > 0.9
                if len(inliers) > len(points_for_ground_search) * self.ground_min_points_ratio and is_horizontal_plane:
                    original_indices_of_ground_search = np.where(ground_search_mask)[0]; ground_indices = original_indices_of_ground_search[inliers]; point_labels[ground_indices] = 1; non_ground_mask[ground_indices] = False
                    current_ground_x = np.median(points_array[ground_indices, 0])
                    if self.estimated_ground_x is None: self.estimated_ground_x = current_ground_x
                    else: self.estimated_ground_x = self.ground_level_smoothing_factor * current_ground_x + (1 - self.ground_level_smoothing_factor) * self.estimated_ground_x
                    self.get_logger().info(f"检测到地面，稳定估计深度: {self.estimated_ground_x:.2f} m")
        
        # 提取所有非地面点及其全局索引
        non_ground_points = points_array[non_ground_mask]
        original_indices_of_non_ground = np.where(non_ground_mask)[0]

        # --- 步骤 2: 在非地面点中定位粗略搜索ROI (用于找绳索) ---
        dist_yz_non_ground = np.sqrt(non_ground_points[:, 1]**2 + non_ground_points[:, 2]**2)
        coarse_roi_mask_local = (dist_yz_non_ground <= self.coarse_search_radius) & (non_ground_points[:, 0] >= self.coarse_search_min_x) & (non_ground_points[:, 0] <= self.coarse_search_max_x)
        points_in_coarse_roi = non_ground_points[coarse_roi_mask_local]
        
        # 预标记ROI内的点为"其他物体"，后续将被绳索和吊钩覆盖
        coarse_roi_indices_global = original_indices_of_non_ground[coarse_roi_mask_local]
        point_labels[coarse_roi_indices_global] = 4

        # --- 步骤 3: 粗定位 - RANSAC绳索平面检测 ---
        if len(points_in_coarse_roi) > self.rope_min_points:
            pcd_coarse_roi = o3d.geometry.PointCloud(); pcd_coarse_roi.points = o3d.utility.Vector3dVector(points_in_coarse_roi[:, :3])
            plane_model, inliers = pcd_coarse_roi.segment_plane(distance_threshold=self.rope_plane_ransac_distance, ransac_n=3, num_iterations=100)
            normal_vector = plane_model[:3]; is_vertical_plane = np.abs(normal_vector[0]) < 0.1

            if len(inliers) > self.rope_min_points and is_vertical_plane:
                # 提取绳索平面点，并预标记为纯绳索
                rope_plane_indices_in_coarse_roi = inliers
                rope_plane_indices_local = np.where(coarse_roi_mask_local)[0][rope_plane_indices_in_coarse_roi]
                rope_plane_indices_global = original_indices_of_non_ground[rope_plane_indices_local]
                point_labels[rope_plane_indices_global] = 3 # 预标记为绳索
                rope_plane_points = non_ground_points[rope_plane_indices_local]

                # --- 【核心修改】步骤 4: 精定位 - 在绳索末端进行精确聚类 ---
                # 1. 计算粗略的吊钩中心
                max_x_rope = np.max(rope_plane_points[:, 0])
                coarse_hook_points = rope_plane_points[rope_plane_points[:, 0] >= (max_x_rope - 0.3)]
                if len(coarse_hook_points) > 0:
                    coarse_hook_center = np.mean(coarse_hook_points[:, :3], axis=0)

                    # 2. 定义精炼球形ROI，并从**所有非地面点**中筛选
                    distances_to_center = np.linalg.norm(non_ground_points[:, :3] - coarse_hook_center, axis=1)
                    refinement_mask_local = (distances_to_center <= self.hook_refinement_radius)
                    points_for_refinement = non_ground_points[refinement_mask_local]
                    
                    # 3. 在精炼区域内进行最终聚类
                    if len(points_for_refinement) > self.hook_refinement_min_points:
                        pcd_refinement = o3d.geometry.PointCloud(); pcd_refinement.points = o3d.utility.Vector3dVector(points_for_refinement[:, :3])
                        cluster_labels = np.array(pcd_refinement.cluster_dbscan(eps=self.hook_refinement_cluster_eps, min_points=self.hook_refinement_min_points, print_progress=False))

                        if cluster_labels.max() > -1:
                            # 找到点数最多的聚类作为最终的吊钩
                            unique_labels, counts = np.unique(cluster_labels[cluster_labels > -1], return_counts=True)
                            final_hook_label = unique_labels[counts.argmax()]
                            
                            # 提取最终的吊钩点，并覆盖标记
                            final_hook_mask_in_refinement = (cluster_labels == final_hook_label)
                            
                            original_indices_of_refinement = np.where(refinement_mask_local)[0]
                            final_hook_indices_local = original_indices_of_refinement[final_hook_mask_in_refinement]
                            
                            final_hook_indices_global = original_indices_of_non_ground[final_hook_indices_local]
                            point_labels[final_hook_indices_global] = 2 # 最终标记为吊钩

                            # 打印高度信息
                            final_hook_depth = np.max(points_array[final_hook_indices_global, 0])
                            self.get_logger().info(f"--- 吊钩已锁定! 深度(距雷达): {final_hook_depth:.2f} 米 ---")
                            if self.estimated_ground_x is not None:
                                height_from_ground = self.estimated_ground_x - final_hook_depth
                                self.get_logger().info(f"--- 吊钩高度(距地面): {height_from_ground:.2f} 米 ---")

        # --- 步骤 5: 上色和可视化 ---
        final_colors = np.zeros((num_original_points, 3))
        # ... (上色逻辑不变) ...
        final_colors[point_labels == 0] = [0.1, 0.1, 0.1]; final_colors[point_labels == 1] = [0.0, 0.5, 0.0]; final_colors[point_labels == 4] = [0.7, 0.7, 0.7]; final_colors[point_labels == 3] = [0.0, 0.5, 1.0]; final_colors[point_labels == 2] = [1.0, 0.0, 0.0]
        
        # ... 发布和可视化 (逻辑不变) ...
        pcl_msg = create_pointcloud2_msg(msg.header, points_array); self.pcl_pub.publish(pcl_msg)
        self.pcd.points = o3d.utility.Vector3dVector(points_array[:, :3]); self.pcd.colors = o3d.utility.Vector3dVector(final_colors)
        if self.is_first_frame:
            if len(self.pcd.points) > 0: self.vis.add_geometry(self.pcd); self.is_first_frame = False
        else: self.vis.update_geometry(self.pcd)
        self.vis.poll_events(); self.vis.update_renderer()
        process_time = (time.time() - start_time) * 1000
        self.get_logger().info(f"帧 {self.frame_count+1}: 总点数{num_original_points} → 耗时{process_time:.2f}ms\n")
        self.frame_count += 1
        
    def destroy_node(self):
        self.get_logger().info("正在关闭节点..."); self.vis.destroy_window(); super().destroy_node(); self.get_logger().info("节点已关闭。")

def main(args=None):
    rclpy.init(args=args); node = LidarProcessor()
    try: rclpy.spin(node)
    except KeyboardInterrupt: node.get_logger().info("捕获到Ctrl+C，开始关闭...")
    finally: node.destroy_node(); rclpy.shutdown()

if __name__ == '__main__':
    main()