import pyrealsense2 as rs
import numpy as np
from pathlib import Path
from typing import Tuple, Optional
import cv2
import logging
import json
from matplotlib import pyplot as plt
from pyorbbecsdk import *
import os
import time


from camera.utils import frame_to_bgr_image

log = logging.getLogger(__name__)


class CameraBaseTransforamtion:
    def __init__(self, calib_dir: str, suffix: str= ""):
        
        calib_file = Path(calib_dir) / f"cali_matrix{suffix}.json"
            
        with open(calib_file, "r") as f:
            data = json.load(f)
        self.camera_base_matrix = np.array(data)
        
        log.info(f"loading CameraBaseTransforamtion from {f}")
        
    def __call__(self, camera_xyz: np.ndarray) -> np.ndarray:
        return np.append(camera_xyz, 1) @ self.camera_base_matrix


class RSCamera:
    wait_timeout: int = 10000
    def __init__(self, intrinsic_file: str = "data/calibrations/rs_can0.npz"):
        # 加载相机内参
        self.camera_matrix, self.dist_coeffs = self._load_camera_intrinsics(intrinsic_file)
        log.info("\n已加载相机内参：")
        log.info("\n相机内参矩阵:")
        log.info(self.camera_matrix)
        log.info("\n畸变系数:")
        log.info(self.dist_coeffs)
        print(self.camera_matrix)
        print(self.dist_coeffs)
        
        # 初始化相机
        self.pipeline = rs.pipeline()
        self.config = rs.config()
        self.config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
        self.config.enable_stream(rs.stream.depth, 320, 240, rs.format.z16, 30)
        self.profile = self.pipeline.start(self.config)

        # 获取深度到彩色的对齐对象
        self.align = rs.align(rs.stream.color)
        
        # 初始化Qwen生成的坐标
        self.pick_point_pixel = None
        self.place_point_pixel = None
        self.pick_point_3d_camera = None
        self.place_point_3d_camera = None
        self.pick_point_3d_base = None
        self.place_point_3d_base = None

    @staticmethod
    def _load_calibration(calib_dir: Path) -> np.ndarray:
        """加载标定结果"""
        calib_file = Path(calib_dir) / "cali_matrix.json"
        with open(calib_file, "r") as f:
            data = json.load(f)
        return np.array(data)

    @staticmethod
    def _load_camera_intrinsics(intrinsic_file: Path) -> Tuple[np.ndarray, np.ndarray]:
        """加载相机内参"""
        print(f"load camera intrinsics from {intrinsic_file}")
        data = np.load(intrinsic_file)
        return data["camera_matrix"], data["dist_coeffs"]
    
    def capture_current_frame(self):
        # 获取对齐的帧
        frames = self.pipeline.wait_for_frames(self.wait_timeout)
        aligned_frames = self.align.process(frames)

        # 获取彩色帧
        color_frame = aligned_frames.get_color_frame()
        color_image = np.asanyarray(color_frame.get_data())
        return color_image  # (HxWxC)
    
    def annotate_image(self, color_image, pixel_xy, text: str, color: Tuple[int, int, int]):
        x, y = pixel_xy
        # annotate PICK point in green color
        cv2.circle(color_image, pixel_xy, 5, color, -1)
        cv2.putText(color_image, text, (x - 20, y - 15), cv2.FONT_HERSHEY_SIMPLEX, 0.7, color, 2)
        return color_image
    

    def save_current_image(self, filename: str = "imgs/current_view.jpg") -> str:
        """保存当前相机图像"""
        color_image = self.capture_current_frame()

        # 保存图像
        cv2.imwrite(filename, color_image)
        log.info(f"Save current frame to: {filename}")
        return filename

    def save_annotated_image(self, 
                             pick_point_pixel: Tuple[int, int], 
                             place_point_pixel: Tuple[int, int], 
                             save_file: str = "imgs/annotated_current_view.jpg",
                             src_file: str = None):
        if src_file is None:
            color_image = self.capture_current_frame()
        else:
            color_image = cv2.imread(src_file)
        if pick_point_pixel:
            pick_x, pick_y = pick_point_pixel
            # annotate PICK point in green color
            cv2.circle(color_image, pick_point_pixel, 5, (0, 255, 0), -1)
            cv2.putText(color_image, "PICK", (pick_x - 20, pick_y - 15), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
        
        if place_point_pixel:
            place_x, place_y = place_point_pixel
            # annotate PLACE point in blue color
            cv2.circle(color_image, place_point_pixel, 5, (255, 0, 0), -1)
            cv2.putText(color_image, "PLACE", (place_x - 20, place_y - 15), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 0, 0), 2)
            
        # save images
        cv2.imwrite(save_file, color_image)
        log.info(f"Saved annotated image to: {save_file}")  


    def get_3d_point_from_pixel(self, x: int, y: int) -> Optional[np.ndarray]:
        """从像素坐标获取3D点坐标（相机坐标系）"""
        # 获取对齐的帧
        frames = self.pipeline.wait_for_frames(self.wait_timeout)
        aligned_frames = self.align.process(frames)

        # 获取深度帧和彩色帧
        depth_frame = aligned_frames.get_depth_frame()
        color_frame = aligned_frames.get_color_frame()

        if not depth_frame or not color_frame:
            print("无法获取帧")
            return None

        # 获取点击位置的深度值
        depth_value = depth_frame.get_distance(x, y)
        if depth_value == 0:
            print("无法获取有效深度值")
            return None

        # 获取相机内参
        intrinsics = depth_frame.profile.as_video_stream_profile().get_intrinsics()

        # 反投影到3D空间
        point_3d = rs.rs2_deproject_pixel_to_point(intrinsics, [x, y], depth_value)
        return np.array(point_3d)
    
class ORBCamera:
    def __init__(self, model: str = "L515"):
        """初始化Orbbec相机"""
        # 创建保存目录
        self.save_color_dir = os.path.join(os.getcwd(), "cali_imgs/color_images")
        self.save_depth_dir = os.path.join(os.getcwd(), "cali_imgs/depth_images")
        self.save_aligned_dir = os.path.join(os.getcwd(), "cali_imgs/aligned_images")
        
        for dir_path in [self.save_color_dir, self.save_depth_dir, self.save_aligned_dir]:
            if not os.path.exists(dir_path):
                os.makedirs(dir_path)
        
        # 初始化相机参数
        self.pipeline = Pipeline()
        self.device = self.pipeline.get_device()
        self.device_info = self.device.get_device_info()
        self.device_pid = self.device_info.get_pid()
        self.config = Config()
        self.model = model
        
        # 时间滤波器用于深度图像
        self.temporal_filter = TemporalFilter(alpha=0.5)
        
        # 深度范围设置
        self.MIN_DEPTH = 20    # 20mm
        self.MAX_DEPTH = 10000 # 10000mm
        
        self.camera_intrinsics = {
            'fx': 498.6755291,    # 从内参矩阵获取的fx值
            'fy': 498.42915853,   # 从内参矩阵获取的fy值
            'cx': 320.39465977,   # 从内参矩阵获取的cx值
            'cy': 193.75127948,   # 从内参矩阵获取的cy值
            'width': 640,         # 保持不变
            'height': 400,        # 保持不变
            # 新增畸变系数
            'dist_coeffs': [0.12690944, -0.1238994, -0.01381218, 0.00034364, -0.21525641]
        }
        
        
        # 运行状态
        self.is_running = False
        
        self._setup_streams()
        
    def _setup_streams(self):
        """配置数据流"""
        try:
            # 配置彩色流
            color_profile_list = self.pipeline.get_stream_profile_list(OBSensorType.COLOR_SENSOR)
            if color_profile_list:
                try:
                    color_profile = color_profile_list.get_video_stream_profile(640, 0, OBFormat.RGB, 30)
                except OBError:
                    color_profile = color_profile_list.get_default_video_stream_profile()
                self.config.enable_stream(color_profile)
                self.has_color_sensor = True
                print(f"Color profile: {color_profile.get_width()}x{color_profile.get_height()}@{color_profile.get_fps()}")
            else:
                self.has_color_sensor = False
                print("No color sensor found")
            
            # 配置深度流
            depth_profile_list = self.pipeline.get_stream_profile_list(OBSensorType.DEPTH_SENSOR)
            if depth_profile_list:
                depth_profile = depth_profile_list.get_default_video_stream_profile()
                self.config.enable_stream(depth_profile)
                self.has_depth_sensor = True
                print(f"Depth profile: {depth_profile.get_width()}x{depth_profile.get_height()}@{depth_profile.get_fps()}")
                
                # 更新相机内参信息
                self.camera_intrinsics['width'] = depth_profile.get_width()
                self.camera_intrinsics['height'] = depth_profile.get_height()
                self.camera_intrinsics['cy'] = depth_profile.get_height() // 2
                
            else:
                self.has_depth_sensor = False
                print("No depth sensor found")
            
            # 设置对齐模式
            if self.device_pid == 0x066B:
                # Femto Mega使用软件对齐
                self.config.set_align_mode(OBAlignMode.SW_MODE)
            else:
                self.config.set_align_mode(OBAlignMode.HW_MODE)
            
            # 启用帧同步（如果支持）
            if self.has_color_sensor and self.has_depth_sensor:
                try:
                    self.pipeline.enable_frame_sync()
                    print("Frame sync enabled")
                except Exception as e:
                    print(f"Frame sync failed: {e}")
            
        except Exception as e:
            print(f"Setup streams failed: {e}")
            raise
    
    def start(self):
        """启动相机"""
        try:
            self.pipeline.start(self.config)
            self.is_running = True
            print("Camera started successfully")
            
            # 尝试获取实际的相机内参
            self._get_camera_intrinsics()
            
            # 等待相机稳定
            time.sleep(1)
            
        except Exception as e:
            print(f"Failed to start camera: {e}")
            raise
    
    def _get_camera_intrinsics(self):
        """获取相机内参"""
        try:
            # 尝试从设备获取内参
            if hasattr(self.device, 'get_camera_param'):
                camera_param = self.device.get_camera_param()
                if camera_param:
                    # 更新深度相机内参
                    depth_intrinsic = camera_param.depth_intrinsic
                    self.camera_intrinsics.update({
                        'fx': depth_intrinsic.fx,
                        'fy': depth_intrinsic.fy,
                        'cx': depth_intrinsic.cx,
                        'cy': depth_intrinsic.cy
                    })
                    print("Successfully loaded camera intrinsics from device:")
                    print(f"fx: {depth_intrinsic.fx}, fy: {depth_intrinsic.fy}")
                    print(f"cx: {depth_intrinsic.cx}, cy: {depth_intrinsic.cy}")
                else:
                    print("Camera parameters not available, using default values")
            else:
                print("Device does not support get_camera_param, using default intrinsics")
                print(f"Using default intrinsics: fx={self.camera_intrinsics['fx']}, fy={self.camera_intrinsics['fy']}")
                print(f"cx={self.camera_intrinsics['cx']}, cy={self.camera_intrinsics['cy']}")
        except Exception as e:
            print(f"Failed to get camera intrinsics: {e}")
            print("Using default intrinsics")
    
    def stop(self):
        """停止相机"""
        try:
            if self.is_running:
                self.pipeline.stop()
                self.is_running = False
                print("Camera stopped")
        except Exception as e:
            print(f"Failed to stop camera: {e}")
    
    def get_3d_point_from_pixel(self, x: int, y: int) -> Optional[np.ndarray]:
        """
        从像素坐标获取3D点坐标（相机坐标系），单位：米
        """
        if not self.is_running:
            print("相机未启动")
            return None

        try:
            frames = self.pipeline.wait_for_frames(1000)
            if frames is None:
                print("无法获取帧")
                return None

            depth_frame = frames.get_depth_frame()
            if not depth_frame:
                print("无法获取深度帧")
                return None

            width = depth_frame.get_width()
            height = depth_frame.get_height()
            # scale = depth_frame.get_depth_scale()  # 通常为1.0

            if x < 0 or x >= width or y < 0 or y >= height:
                print(f"像素坐标 ({x}, {y}) 超出图像范围 ({width}x{height})")
                return None

            depth_data = np.frombuffer(depth_frame.get_data(), dtype=np.uint16).reshape((height, width))
            depth_mm = depth_data[y, x]  # 单位：毫米

            if depth_mm <= self.MIN_DEPTH or depth_mm >= self.MAX_DEPTH:
                print(f"深度值 {depth_mm}mm 超出有效范围 ({self.MIN_DEPTH}-{self.MAX_DEPTH}mm)")
                return None

            # 单位转换为米
            z = depth_mm / 1000.0

            fx = self.camera_intrinsics['fx']
            fy = self.camera_intrinsics['fy']
            cx = self.camera_intrinsics['cx']
            cy = self.camera_intrinsics['cy']

            x_3d = (x - cx) * z / fx
            y_3d = (y - cy) * z / fy

            point_3d = np.array([x_3d, y_3d, z])
            print(f"像素坐标 ({x},{y}) -> 3D坐标: ({x_3d:.3f}, {y_3d:.3f}, {z:.3f}) 米 (深度 {depth_mm}mm)")
            return point_3d

        except Exception as e:
            print(f"获取3D坐标时出错: {e}")
            return None
                                

    def capture_current_frame(self):
        """捕获当前彩色帧并转换为OpenCV图像"""
        if not self.is_running:
            print("相机未启动")
            return None

        retry = 0
        max_retries = 5  # 最多重试5次

        while retry < max_retries:
            try:
                frames = self.pipeline.wait_for_frames(1000)  # 等待1秒
                if frames is None:
                    print(f"获取帧失败，重试 {retry + 1}/{max_retries}")
                    retry += 1
                    continue

                color_frame = frames.get_color_frame()
                if color_frame is None:
                    print(f"获取彩色帧失败，重试 {retry + 1}/{max_retries}")
                    retry += 1
                    continue

                # 转换为BGR图像
                color_image = frame_to_bgr_image(color_frame)
                if color_image is None:
                    print(f"图像转换失败，重试 {retry + 1}/{max_retries}")
                    retry += 1
                    continue

                return color_image  # 成功获取图像

            except Exception as e:
                print(f"捕获帧时出错: {e}")
                retry += 1
                time.sleep(0.1)

        print("捕获彩色帧失败，请检查相机连接或驱动")
        return None
    
    def save_current_image(self, save_dir=".cache/onhand_imgs/"):
        """
        捕获当前帧并保存为图像文件，返回保存路径
        :param save_dir: 图像保存的目录
        :param filename_prefix: 文件名前缀
        :return: 保存的图像路径，失败返回 None
        """
        # 创建保存目录（如果不存在）
        if not os.path.exists(save_dir):
            os.makedirs(save_dir)

        # 捕获当前帧
        color_image = self.capture_current_frame()
        if color_image is None:
            print("无法获取图像，保存失败")
            return None

        # 构造文件名（使用时间戳避免重复）
        filename = f"current_view.jpg"
        save_path = os.path.join(save_dir, filename)

        try:
            # 保存图像
            cv2.imwrite(save_path, color_image)
            print(f"图像已保存至: {save_path}")
            return save_path
        except Exception as e:
            print(f"图像保存失败: {e}")
            return None

    def save_annotated_image(self, 
                             pick_point_pixel: Tuple[int, int], 
                             place_point_pixel: Tuple[int, int], 
                             save_file: str = ".cache/onhand_imgs/annotated_current_view.jpg",
                             src_file: str = None):
        if src_file is None:
            color_image = self.capture_current_frame()
        else:
            color_image = cv2.imread(src_file)
        if pick_point_pixel:
            pick_x, pick_y = pick_point_pixel
            # annotate PICK point in green color
            cv2.circle(color_image, pick_point_pixel, 5, (0, 255, 0), -1)
            cv2.putText(color_image, "PICK", (pick_x - 20, pick_y - 15), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
        
        if place_point_pixel:
            place_x, place_y = place_point_pixel
            # annotate PLACE point in blue color
            cv2.circle(color_image, place_point_pixel, 5, (255, 0, 0), -1)
            cv2.putText(color_image, "PLACE", (place_x - 20, place_y - 15), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 0, 0), 2)
            
        # save images
        cv2.imwrite(save_file, color_image)
        log.info(f"Saved annotated image to: {save_file}")  

    def save_color_frame(self, color_frame, timestamp=None):
        """保存彩色图像"""
        if timestamp is None:
            timestamp = int(time.time() * 1000)
        
        color_image = frame_to_bgr_image(color_frame)
        if color_image is not None:
            filename = os.path.join(self.save_color_dir, 
                                  f"color_{color_frame.get_width()}x{color_frame.get_height()}_{timestamp}.png")
            cv2.imwrite(filename, color_image)
            print(f"Color image saved: {filename}")
            return filename
        return None
    
    def save_depth_frame(self, depth_frame, timestamp=None):
        """保存深度图像"""
        if timestamp is None:
            timestamp = int(time.time() * 1000)
        
        width = depth_frame.get_width()
        height = depth_frame.get_height()
        scale = depth_frame.get_depth_scale()
        
        # 保存原始深度数据
        depth_data = np.frombuffer(depth_frame.get_data(), dtype=np.uint16)
        depth_data = depth_data.reshape((height, width))
        depth_data = (depth_data * scale).astype(np.uint16)
        
        raw_filename = os.path.join(self.save_depth_dir, 
                                   f"depth_{width}x{height}_{timestamp}.raw")
        depth_data.tofile(raw_filename)
        
        # 保存可视化深度图像
        depth_data_float = depth_data.astype(np.float32)
        depth_data_float = np.where((depth_data_float > self.MIN_DEPTH) & 
                                   (depth_data_float < self.MAX_DEPTH), depth_data_float, 0)
        
        depth_image = cv2.normalize(depth_data_float, None, 0, 255, cv2.NORM_MINMAX, dtype=cv2.CV_8U)
        depth_image_colored = cv2.applyColorMap(depth_image, cv2.COLORMAP_JET)
        
        vis_filename = os.path.join(self.save_depth_dir, 
                                   f"depth_vis_{width}x{height}_{timestamp}.png")
        cv2.imwrite(vis_filename, depth_image_colored)
        
        print(f"Depth data saved: {raw_filename}")
        print(f"Depth visualization saved: {vis_filename}")
        return raw_filename, vis_filename
    
    def save_aligned_frame(self, color_image, depth_image, timestamp=None):
        """保存对齐后的图像"""
        if timestamp is None:
            timestamp = int(time.time() * 1000)
        
        if color_image is not None and depth_image is not None:
            # 创建对齐图像（深度图叠加到彩色图上）
            aligned_image = cv2.addWeighted(color_image, 0.5, depth_image, 0.5, 0)
            
            filename = os.path.join(self.save_aligned_dir, f"aligned_{timestamp}.png")
            cv2.imwrite(filename, aligned_image)
            print(f"Aligned image saved: {filename}")
            return filename
        return None
    
    def run_viewer(self):
        """运行三个可视化窗口，支持鼠标获取3D坐标"""
        print("Starting camera viewer...")
        print("Controls:")
        print("  'q' or ESC - Quit")
        print("  's' - Save current frames")
        print("  'c' - Toggle color window")
        print("  'd' - Toggle depth window")
        print("  'a' - Toggle aligned window")
        print("  Click on image - Get 3D coordinates")

        self.start()

        show_color = True
        show_depth = True
        show_aligned = True

        last_print_time = time.time()
        frame_count = 0

        # 鼠标回调函数
        def mouse_callback(event, x, y, flags, param):
            if event == cv2.EVENT_LBUTTONDOWN:
                print(f"Clicked at pixel: ({x}, {y})")
                start_time = time.time()
                point_3d = self.get_3d_point_from_pixel(x, y)
                end_time = time.time()
                if point_3d is not None:
                    print(f"3D coordinates: {point_3d}")
                    print(f"get_3d_point_from_pixel took: {(end_time - start_time) * 1000:.2f} ms")
                else:
                    print("Failed to get 3D coordinates.")

        try:
            while True:
                frames = self.pipeline.wait_for_frames(100)
                if frames is None:
                    continue

                color_frame = None
                depth_frame = None
                color_image = None
                depth_image_colored = None

                # 获取彩色帧
                if self.has_color_sensor:
                    color_frame = frames.get_color_frame()
                    if color_frame is not None:
                        color_image = frame_to_bgr_image(color_frame)

                # 获取深度帧，并按Orbbec官方方式处理
                if self.has_depth_sensor:
                    depth_frame = frames.get_depth_frame()
                    if depth_frame is not None:
                        width = depth_frame.get_width()
                        height = depth_frame.get_height()
                        # Orbbec官方: 深度数据为uint16，单位为毫米
                        depth_data = np.frombuffer(depth_frame.get_data(), dtype=np.uint16).reshape((height, width))
                        # 只保留有效深度范围
                        depth_data = np.where((depth_data > self.MIN_DEPTH) & (depth_data < self.MAX_DEPTH), depth_data, 0)
                        # 应用时间滤波
                        depth_data = self.temporal_filter.process(depth_data)
                        # 可视化
                        depth_image = cv2.normalize(depth_data, None, 0, 255, cv2.NORM_MINMAX, dtype=cv2.CV_8U)
                        depth_image_colored = cv2.applyColorMap(depth_image, cv2.COLORMAP_JET)
                        # 打印中心点距离
                        center_y, center_x = height // 2, width // 2
                        center_distance = depth_data[center_y, center_x]
                        current_time = time.time()
                        if current_time - last_print_time >= 1.0:
                            print(f"Frame: {frame_count}, Center distance: {center_distance} mm")
                            last_print_time = current_time

                # 显示窗口
                if show_color and color_image is not None:
                    cv2.imshow("Color Viewer", color_image)
                    cv2.setMouseCallback("Color Viewer", mouse_callback)
                if show_depth and depth_image_colored is not None:
                    cv2.imshow("Depth Viewer", depth_image_colored)
                    cv2.setMouseCallback("Depth Viewer", mouse_callback)
                if show_aligned and color_image is not None and depth_image_colored is not None:
                    # 尺寸对齐
                    if color_image.shape[:2] != depth_image_colored.shape[:2]:
                        depth_image_colored_resized = cv2.resize(
                            depth_image_colored, (color_image.shape[1], color_image.shape[0])
                        )
                    else:
                        depth_image_colored_resized = depth_image_colored
                    aligned_image = cv2.addWeighted(color_image, 0.5, depth_image_colored_resized, 0.5, 0)
                    cv2.imshow("Aligned Viewer", aligned_image)
                    cv2.setMouseCallback("Aligned Viewer", mouse_callback)

                # 处理按键
                key = cv2.waitKey(1) & 0xFF
                if key == ord('q') or key == 27:
                    break
                elif key == ord('s'):
                    timestamp = int(time.time() * 1000)
                    if color_frame is not None:
                        self.save_color_frame(color_frame, timestamp)
                    if depth_frame is not None:
                        self.save_depth_frame(depth_frame, timestamp)
                    if color_image is not None and depth_image_colored is not None:
                        self.save_aligned_frame(color_image, depth_image_colored, timestamp)
                elif key == ord('c'):
                    show_color = not show_color
                    if not show_color:
                        cv2.destroyWindow("Color Viewer")
                elif key == ord('d'):
                    show_depth = not show_depth
                    if not show_depth:
                        cv2.destroyWindow("Depth Viewer")
                elif key == ord('a'):
                    show_aligned = not show_aligned
                    if not show_aligned:
                        cv2.destroyWindow("Aligned Viewer")

                frame_count += 1

        except KeyboardInterrupt:
            print("Interrupted by user")
        finally:
            self.stop()
            cv2.destroyAllWindows()



class TemporalFilter:
    """时间滤波器用于深度图像降噪"""
    def __init__(self, alpha):
        self.alpha = alpha
        self.previous_frame = None

    def process(self, frame):
        if self.previous_frame is None:
            result = frame
        else:
            result = cv2.addWeighted(frame, self.alpha, self.previous_frame, 1 - self.alpha, 0)
        self.previous_frame = result
        return result

def test_3d_point_performance():
    """测试get_3d_point_from_pixel的性能"""
    print("=== 测试 get_3d_point_from_pixel 性能 ===")
    
    try:
        camera = ORBCamera()
        camera.start()
        import pdb; pdb.set_trace()
        # 等待相机稳定
        time.sleep(2)
        
        # 测试点列表（图像中心附近的点）
        test_points = [
            (320, 200),  # 中心点
            (300, 180),
            (340, 220),
            (280, 160),
            (360, 240),
            (320, 150),
            (320, 250),
            (250, 200),
            (390, 200),
            (320, 100)
        ]
        
        print(f"测试 {len(test_points)} 个点的3D坐标获取性能...")
        
        times = []
        successful_points = 0
        
        for i, (x, y) in enumerate(test_points):
            print(f"\n测试点 {i+1}/{len(test_points)}: ({x}, {y})")
            
            start_time = time.time()
            point_3d = camera.get_3d_point_from_pixel(x, y)
            end_time = time.time()
            
            elapsed_time = (end_time - start_time) * 1000  # 转换为毫秒
            times.append(elapsed_time)
            
            if point_3d is not None:
                successful_points += 1
                print(f"3D坐标: ({point_3d[0]:.3f}, {point_3d[1]:.3f}, {point_3d[2]:.3f}) 米")
            else:
                print("获取3D坐标失败")
            print(f"耗时: {elapsed_time:.2f} ms")
        
        avg_time = np.mean(times) if times else 0
        print("\n=== 性能统计 ===")
        print(f"成功点数: {successful_points}/{len(test_points)}")
        print(f"平均耗时: {avg_time:.2f} ms")
        print(f"单次最小耗时: {np.min(times):.2f} ms")
        print(f"单次最大耗时: {np.max(times):.2f} ms")
        
    except Exception as e:
        print(f"性能测试出错: {e}")
    finally:
        try:
            camera.stop()
        except Exception:
            pass

def main():
    """主函数"""
    # 运行性能测试
    # test_3d_point_performance()
    # 或者运行可视化
    camera = ORBCamera()
    # camera.run_viewer()
    # camera.start()
    # camera.get_
    # camera = RSCamera()
    # img = camera.capture_current_frame()
if __name__ == "__main__":
    main()
