#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@CreateTime: 2025/10/9 10:52
@Author  : AnimateX
@Contact : animatex@163.com
@File    : rmsl_viewer_refactor.py
@License : Copyright © 2025 AnimateX. All rights reserved.
@version : rmsl_viewer_refactor_v2025_10_09

--------------------------------------------------------------

@Description:
    [Update History]: 2.0 Version
        [2025-10-14]: v2.0.0 Dual/Three UVC support(new version)
        [2025-10-20]: V2.0.1 fix exit bug and optim performance analysis
        [2025-10-21]: V2.0.2 隐藏开机过程中的terminal弹窗
        [2025-11-13]: 重构UI部分，支持分辨率选择和帧率展示
        [2025-11-14]: 获取标定文件
"""

from PyQt5 import QtWidgets, QtCore
from PyQt5.QtGui import QPixmap, QImage, QPainter, QIcon, QColor
from PyQt5.QtCore import QObject, QTimer, Qt, pyqtSignal, QEvent, QThread
from PyQt5.QtWidgets import (QWidget, QGraphicsScene, QGraphicsView, QApplication, QGraphicsPixmapItem, QToolTip, QMessageBox)
import re
import os
import cv2
import sys
import time
import json
import ctypes
from ctypes import *
import struct
import subprocess
import numpy as np
from pathlib import Path
from collections import deque
from queue import Queue, Empty, Full
from datetime import datetime
from dataclasses import dataclass
from threading import Thread, Lock, Event
from typing import Tuple, Dict, Optional, Any, List

from viewer_new_ui import Ui_RMSLViewer

from pyqtgraph.opengl import GLViewWidget
import pyqtgraph.opengl as gl
import pyqtgraph as pg


def get_project_root():
    if "__file__" in globals():
        root = Path(__file__).resolve().parent
    else:
        root = Path(sys.argv[0]).resolve().parent
    return root


PROJECT_ROOT = get_project_root()
# log
RMSL_LOG_PATH = PROJECT_ROOT / "viewer.log"
# ffmpeg
FFMPEG = str(PROJECT_ROOT / "ffmpeg/ffmpeg.exe")
# UVCController dll
UVCController_DLL_PATH = str(PROJECT_ROOT / "UVCController.dll")


""" ------------------------------------------ [日志管理器] ------------------------------------------ """
class Logger(QObject):
    log_signal = pyqtSignal(str)

    def __init__(self, log_file_path=None):
        super().__init__()
        self.log_file_path = log_file_path
        self.file_logging_enabled = log_file_path is not None
        self._current_line_start = True

    def _write_to_file(self, msg, with_newline=True, with_timestamp=True):
        if not self.file_logging_enabled:
            return

        try:
            with open(self.log_file_path, 'a', encoding='utf-8') as f:
                if with_timestamp and self._current_line_start:
                    timestamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
                    f.write(f"[{timestamp}] ")

                f.write(msg)

                if with_newline:
                    f.write('\n')
                    self._current_line_start = True
                else:
                    self._current_line_start = False

                f.flush()
        except Exception as e:
            msg = f" [Error] Can not open log file. Error info: {e}"

    def log(self, msg):
        self.log_signal.emit(msg)
        self._write_to_file(msg, with_newline=True)

    def log_to_file(self, msg):
        self._write_to_file(msg, with_newline=True)


logger = None


def init_logger(log_file_path=None):
    global logger
    if logger is None:
        logger = Logger(log_file_path)
    return logger


""" ------------------------------------------ [数据容器] ------------------------------------------ """
@dataclass
class FrameData:
    timestamp: float
    frame_id: int
    label: str
    data: np.ndarray

@dataclass
class StreamConfig:
    """流配置"""
    uvc_name: str
    width: int
    height: int
    fps: int
    pixel_format: str
    queue_size: int = 5

@dataclass
class MatchedDataPack:
    timestamp: float
    rgb: Optional[np.ndarray] = None
    disp: Optional[np.ndarray] = None
    depth: Optional[np.ndarray] = None
    depth_color: Optional[np.ndarray] = None
    pointcloud: Optional[np.ndarray] = None
    ir_left: Optional[np.ndarray] = None
    ir_right: Optional[np.ndarray] = None
    time_diff: float = 0.0

""" ------------------------------------------ [有界队列管理器] ------------------------------------------ """
class BoundedQueue:
    """带丢弃策略的有界队列"""
    def __init__(self, maxsize=2, name=""):
        self.queue = Queue(maxsize=maxsize)
        self.name = name
        self._lock = Lock()  # 保护计数器
        self._closed = False

        # 统计信息
        self.put_drop_count = 0  # put时丢弃的帧
        self.get_skip_count = 0  # get_latest跳过的帧
        self.total_put = 0
        self.total_get = 0

    def put(self, item, drop_old=True):
        """
        drop_old=True: 队列满时丢弃最旧数据
        drop_old=False: 队列满时丢弃新数据
        """
        if self._closed:
            return False

        with self._lock:
            if self._closed:  # 双重检查
                return False

            self.total_put += 1

            if self.queue.full():
                if drop_old:
                    try:
                        self.queue.get_nowait()
                        self.put_drop_count += 1
                    except Empty:
                        pass
                else:
                    self.put_drop_count += 1
                    return False

            try:
                self.queue.put_nowait(item)
                return True
            except Full:
                return False

    def get_latest(self, timeout=0.01):
        if self._closed:
            return None

        try:
            item = self.queue.get(timeout=timeout)
        except Empty:
            return None

        skipped = 0
        while True:
            try:
                new_item = self.queue.get_nowait()
                skipped += 1
                item = new_item  # 更新为更新的数据
            except Empty:
                break

        with self._lock:
            self.total_get += 1
            if skipped > 0:
                self.get_skip_count += skipped

        return item

    def get(self, timeout=None):
        """常规获取（FIFO）"""
        if self._closed:
            return None

        try:
            item = self.queue.get(timeout=timeout)
            with self._lock:
                self.total_get += 1
            return item
        except Empty:
            return None

    def qsize(self):
        return self.queue.qsize()

    def clear(self):
        with self._lock:
            while not self.queue.empty():
                try:
                    self.queue.get_nowait()
                except Empty:
                    break

    def close(self):
        self._closed = True
        time.sleep(0.05)
        self.clear()

    def reset(self):
        """重置队列为可用状态（用于重启）"""
        self.clear()
        with self._lock:
            self._closed = False
            self.put_drop_count = 0
            self.get_skip_count = 0
            self.total_put = 0
            self.total_get = 0

    @property
    def is_closed(self):
        return self._closed

    def get_stats(self):
        with self._lock:
            total_drops = self.put_drop_count + self.get_skip_count
            return {
                'name': self.name,
                'qsize': self.queue.qsize(),
                'put_drops': self.put_drop_count,
                'get_skips': self.get_skip_count,
                'total_drops': total_drops,
                'total_put': self.total_put,
                'total_get': self.total_get,
                'drop_rate': total_drops / self.total_put if self.total_put > 0 else 0
            }

""" ------------------------------------------ [MessageBox美化] ------------------------------------------ """
MessageBox_Style = """
    QMessageBox {
        background-color: #F5F5F5;
        font-family: 'YaHei Consolas Hybrid', 'Microsoft YaHei', sans-serif;
        font-size: 10px;
        padding: 10px;
        spacing: 8px;
    }
    QMessageBox QLabel {
        color: #333333;
        font-size: 12px;
        min-width: 0px;
        margin-left: 0px;
        padding-left: 0px;
    }
    QMessageBox::icon-label {
        min-width: 32px;
        min-height: 32px;
        margin-right: 0px;
        padding: 0px;
        qproperty-alignment: AlignCenter;
    }
    QMessageBox QPushButton {
        background-color: #4A90E2;
        font-family: 'YaHei Consolas Hybrid', 'Microsoft YaHei', sans-serif;
        font-size: 12px;
        color: white;
        border-radius: 4px;
        padding: 5px 15px;
        outline: none;
        border: none;
        min-width: 80px;
        min-height: 25px;
    }
    QMessageBox QPushButton:hover {
        background-color: #5A9AE8;
    }
    QMessageBox QPushButton:pressed {
        background-color: #3A80D2;
    }
"""

""" ------------------------------------------ [UVCFormat枚举器] ------------------------------------------ """
# 获取相机支持的全部分辨率和帧率列表
class UVCFormatEnumerator:
    def __init__(self, device_name: str):
        self.device_name = device_name
        self.logger = init_logger(RMSL_LOG_PATH)

    def get_supported_formats(self) -> List[Dict]:
        """获取所有支持的格式"""
        cmd = [
            FFMPEG,
            '-list_options', 'true',
            '-f', 'dshow',
            '-i', f'video={self.device_name}'
        ]

        try:
            result = subprocess.run(
                cmd,
                capture_output=True,
                text=True,
                encoding='utf-8',
                errors='ignore'
            )

            # ffmpeg的设备信息在stderr中
            output = result.stderr
            return self._parse_formats(output)

        except Exception as e:
            self.logger.log_to_file(f"Error: {e}")
            return []

    def _parse_formats(self, output: str) -> List[Dict]:
        """解析ffmpeg输出"""
        formats = []
        current_pixel_format = None

        # 正则匹配示例：
        # [dshow @ 000001] pixel_format=yuyv422  min s=640x480   fps=30 max s=1920x1080 fps=30
        # [dshow @ 000001]       vcodec=mjpeg    min s=1920x1080 fps=30 max s=1920x1080 fps=30

        for line in output.split('\n'):
            # 匹配像素格式
            pixel_match = re.search(r'pixel_format=(\w+)', line)
            if pixel_match:
                current_pixel_format = pixel_match.group(1)

            # 匹配编码格式
            codec_match = re.search(r'vcodec=(\w+)', line)
            if codec_match:
                current_pixel_format = codec_match.group(1)

            # 匹配分辨率和帧率
            # 格式1: min s=640x480 fps=30 max s=1920x1080 fps=30
            range_match = re.search(
                r'min s=(\d+)x(\d+) fps=([\d.]+) max s=(\d+)x(\d+) fps=([\d.]+)',
                line
            )
            if range_match and current_pixel_format:
                min_w, min_h, min_fps, max_w, max_h, max_fps = range_match.groups()
                formats.append({
                    'format': current_pixel_format.upper(),
                    'width': int(max_w),
                    'height': int(max_h),
                    'fps': float(max_fps),
                    'min_width': int(min_w),
                    'min_height': int(min_h),
                    'min_fps': float(min_fps)
                })

            # 格式2: s=1920x1080 fps=30
            simple_match = re.search(r's=(\d+)x(\d+)\s+fps=([\d.]+)', line)
            if simple_match and current_pixel_format:
                w, h, fps = simple_match.groups()
                formats.append({
                    'format': current_pixel_format.upper(),
                    'width': int(w),
                    'height': int(h),
                    'fps': float(fps)
                })

        # remove duplicate part
        formats = UVCFormatEnumerator.remove_duplicates(formats)
        self.logger.log_to_file(f"Success parser UVCFormatEnum")

        return formats

    @staticmethod
    def remove_duplicates(resolutions):
        # 创建一个字典，key为唯一标识，value为配置
        unique_dict = {}
        for res in resolutions:
            key = (res['format'], res['width'], res['height'], res['fps'])
            if key not in unique_dict:
                unique_dict[key] = res
        return list(unique_dict.values())

""" ------------------------------------------ [CalibData Getter] ------------------------------------------ """
MAT3_SIZE = 9
MAT4_SIZE = 16
VEC3_SIZE = 3
VEC12_SIZE = 12
VEC14_SIZE = 14

class StereoCameraParams(Structure):
    """双目相机标定参数结构体"""
    _fields_ = [
        # 红外相机分辨率
        ("l_width", c_int),
        ("l_height", c_int),
        ("r_width", c_int),
        ("r_height", c_int),

        # 相机内参矩阵 (3x3)
        ("K_l", c_double * MAT3_SIZE),
        ("K_r", c_double * MAT3_SIZE),

        # 畸变系数
        ("D_l", c_double * VEC14_SIZE),
        ("D_r", c_double * VEC14_SIZE),

        # 立体校正参数
        ("R", c_double * MAT3_SIZE),  # 旋转矩阵
        ("T", c_double * VEC3_SIZE),  # 平移向量

        ("R1", c_double * MAT3_SIZE),  # 左相机旋转矩阵
        ("R2", c_double * MAT3_SIZE),  # 右相机旋转矩阵

        # 投影矩阵 (3x4)
        ("P1", c_double * VEC12_SIZE),
        ("P2", c_double * VEC12_SIZE),

        # 重投影矩阵 (4x4)
        ("Q", c_double * MAT4_SIZE),

        # 逆投影矩阵 (3x3)
        ("invPR1", c_double * MAT3_SIZE),
        ("invPR2", c_double * MAT3_SIZE),

        # 标定误差
        ("stereo_rms", c_double),
        ("left_rms", c_double),
        ("right_rms", c_double),

        # RGB相机参数
        ("rgb_width", c_int),
        ("rgb_height", c_int),
        ("K_rgb", c_double * MAT3_SIZE),
        ("D_rgb", c_double * VEC12_SIZE),

        # IR到RGB的变换参数
        ("R_ir2rgb", c_double * MAT3_SIZE),
        ("T_ir2rgb", c_double * VEC3_SIZE),

        ("R1_ir2rgb", c_double * MAT3_SIZE),
        ("R2_ir2rgb", c_double * MAT3_SIZE),

        ("P1_ir2rgb", c_double * VEC12_SIZE),
        ("P2_ir2rgb", c_double * VEC12_SIZE),

        ("Q_ir2rgb", c_double * MAT4_SIZE),

        # RGB相机标定误差
        ("rgb_rms", c_double),
        ("ir_rgb_stereo_rms", c_double),
    ]

class UVCStereoCameraParams(Structure):
    """UVC传输的标定数据结构"""
    _pack_ = 1
    _fields_ = [
        ("param_size", c_uint),
        ("param_checkSum", c_uint),
        ("params", StereoCameraParams),
    ]

class RMSLUVCController:
    # 错误码
    UVC_SUCCESS = 0
    UVC_ERROR_DEVICE_NOT_FOUND = -1
    UVC_ERROR_NO_EXTENSION_UNIT = -2
    UVC_ERROR_CONTROL_FAILED = -3
    UVC_ERROR_INVALID_PARAM = -4
    UVC_ERROR_CHECKSUM_FAILED = -5

    def __init__(self):
        """初始化控制器"""
        self.dll = ctypes.CDLL(UVCController_DLL_PATH)
        self._setup_functions()

    def _setup_functions(self):
        """设置DLL函数签名"""
        # UVC_InitDevice
        self.dll.UVC_InitDevice.argtypes = [c_int]
        self.dll.UVC_InitDevice.restype = c_int

        # UVC_ReleaseDevice
        self.dll.UVC_ReleaseDevice.argtypes = []
        self.dll.UVC_ReleaseDevice.restype = None

        # UVC_GetCalibrationData
        self.dll.UVC_GetCalibrationData.argtypes = [c_void_p, c_int, c_int]
        self.dll.UVC_GetCalibrationData.restype = c_int

        # UVC_GetUVCSDKVersion
        self.dll.UVC_GetVersion.argtypes = [c_char_p, c_int]
        self.dll.UVC_GetVersion.restype = c_int

        # UVC_GetDeviceName
        self.dll.UVC_GetDeviceName.argtypes = [c_char_p, c_int]
        self.dll.UVC_GetDeviceName.restype = c_int

        # UVC_EnumerateDevices
        self.dll.UVC_EnumerateDevices.argtypes = [c_char_p, c_int]
        self.dll.UVC_EnumerateDevices.restype = c_int

        # UVC_GetCalibrationDataSize
        self.dll.UVC_GetCalibrationDataSize.argtypes = []
        self.dll.UVC_GetCalibrationDataSize.restype = c_int

    def enumerate_devices(self):
        """枚举所有 Rockchip 设备"""
        buffer = create_string_buffer(4096)
        count = self.dll.UVC_EnumerateDevices(buffer, 4096)

        if count <= 0:
            return []

        devices = []
        for line in buffer.value.decode('utf-8').strip().split('\n'):
            if '|' in line:
                idx, name = line.split('|', 1)
                devices.append((int(idx), name))

        return devices

    def init_device(self, device_index=0):
        """初始化设备"""
        ret = self.dll.UVC_InitDevice(device_index)

        error_msgs = {
            self.UVC_ERROR_DEVICE_NOT_FOUND: "RMSL Device not found (VID:2207 PID:001A)",
            self.UVC_ERROR_NO_EXTENSION_UNIT: "Can not found extend unit",
        }

        if ret != self.UVC_SUCCESS:
            msg = error_msgs.get(ret, f"Unknown error: {ret}")
            raise RuntimeError(f"Failed initial: {msg}")

    def release(self):
        """释放设备"""
        self.dll.UVC_ReleaseDevice()

    def get_calibration_data(self, verify_checksum=True):
        params = UVCStereoCameraParams()
        size = sizeof(params)

        ret = self.dll.UVC_GetCalibrationData(
            byref(params),
            size,
            1 if verify_checksum else 0
        )

        if ret < 0:
            error_msgs = {
                self.UVC_ERROR_CONTROL_FAILED: "Device communication failed",
                self.UVC_ERROR_CHECKSUM_FAILED: "Failed verifying checksum",
            }
            msg = error_msgs.get(ret, f"Error code: {ret}")
            raise RuntimeError(f"Failed to get calib-data: {msg}")

        return params

    def get_device_name(self):
        """获取设备名称"""
        buffer = create_string_buffer(256)
        ret = self.dll.UVC_GetDeviceName(buffer, 256)

        if ret != self.UVC_SUCCESS:
            return "Unknown Device"

        return buffer.value.decode('utf-8')

    def get_version(self):
        buffer = create_string_buffer(256)
        self.dll.UVC_GetVersion(buffer, 256)

        return buffer.value.decode('utf-8')

    def __enter__(self):
        return self

    def __exit__(self, exc_type, exc_val, exc_tb):
        self.release()


class CalibrationParser:
    def __init__(self, params: StereoCameraParams):
        self.params = params

    @staticmethod
    def _array_to_matrix(arr, shape):
        """将c_double数组转换为numpy矩阵"""
        return np.array(arr).reshape(shape)

    def get_all_camera_intrinsic(self):
        """从StereoCameraParams解析为calib_info格式"""
        calib_info = dict()

        # 左相机参数
        calib_info['l_width'] = self.params.l_width
        calib_info['l_height'] = self.params.l_height

        K_l = CalibrationParser._array_to_matrix(self.params.K_l, (3, 3))
        calib_info['l_intrinsic'] = {
            'fx': float(K_l[0, 0]),
            'fy': float(K_l[1, 1]),
            'cx': float(K_l[0, 2]),
            'cy': float(K_l[1, 2]),
            'K_l': K_l
        }

        D_l = np.array(self.params.D_l)
        calib_info['l_distortion'] = {
            'k1': float(D_l[0]), 'k2': float(D_l[1]),
            'p1': float(D_l[2]), 'p2': float(D_l[3]),
            'k3': float(D_l[4]), 'k4': float(D_l[5]),
            'k5': float(D_l[6]), 'k6': float(D_l[7]),
            's1': float(D_l[8]), 's2': float(D_l[9]),
            's3': float(D_l[10]), 's4': float(D_l[11]),
            'D_l': D_l
        }

        calib_info['R1'] = CalibrationParser._array_to_matrix(self.params.R1, (3, 3))
        calib_info['P1'] = CalibrationParser._array_to_matrix(self.params.P1, (3, 4))

        Q = CalibrationParser._array_to_matrix(self.params.Q, (4, 4))
        calib_info['q23'] = Q[2, 3]
        calib_info['q32'] = Q[3, 2]
        calib_info['q33'] = Q[3, 3]

        # RGB相机参数
        K_rgb = CalibrationParser._array_to_matrix(self.params.K_rgb, (3, 3))

        # 检查是否有有效的RGB参数
        if K_rgb[0, 0] > 0:  # 假设fx>0表示有效
            calib_info['rgb_width'] = self.params.rgb_width
            calib_info['rgb_height'] = self.params.rgb_height
            calib_info['rgb_intrinsic'] = {
                'fx': float(K_rgb[0, 0]),
                'fy': float(K_rgb[1, 1]),
                'cx': float(K_rgb[0, 2]),
                'cy': float(K_rgb[1, 2]),
                'K': K_rgb
            }

            D_rgb = np.array(self.params.D_rgb[:12])  # VEC12_SIZE
            calib_info['rgb_distortion'] = {
                'k1': float(D_rgb[0]), 'k2': float(D_rgb[1]),
                'p1': float(D_rgb[2]), 'p2': float(D_rgb[3]),
                'k3': float(D_rgb[4]), 'k4': float(D_rgb[5]),
                'k5': float(D_rgb[6]), 'k6': float(D_rgb[7]),
                's1': float(D_rgb[8]), 's2': float(D_rgb[9]),
                's3': float(D_rgb[10]), 's4': float(D_rgb[11]),
                'D': D_rgb
            }

            R_ir2rgb = CalibrationParser._array_to_matrix(self.params.R_ir2rgb, (3, 3))
            invPR1 = CalibrationParser._array_to_matrix(self.params.invPR1, (3, 3))
            T_ir2rgb = CalibrationParser._array_to_matrix(self.params.T_ir2rgb, (3, 1))
            calib_info['T_ir2rgb'] = T_ir2rgb
            calib_info['R_ir2rgb'] = R_ir2rgb
            calib_info['invPR1'] = invPR1

            R_proj = K_rgb @ R_ir2rgb @ invPR1
            calib_info['r_coeff'] = {
                'r0': float(R_proj[0, 0]), 'r1': float(R_proj[0, 1]), 'r2': float(R_proj[0, 2]),
                'r3': float(R_proj[1, 0]), 'r4': float(R_proj[1, 1]), 'r5': float(R_proj[1, 2]),
                'r6': float(R_proj[2, 0]), 'r7': float(R_proj[2, 1]), 'r8': float(R_proj[2, 2])
            }

            T_proj = K_rgb @ T_ir2rgb
            calib_info['t_coeff'] = {
                't0': float(T_proj[0, 0]),
                't1': float(T_proj[1, 0]),
                't2': float(T_proj[2, 0])
            }

            calib_info['category'] = 'rgbd'
        else:
            calib_info['rgb_width'] = self.params.rgb_width
            calib_info['rgb_height'] = self.params.rgb_height
            calib_info['rgb_intrinsic'] = None
            calib_info['rgb_distortion'] = None
            calib_info['r_coeff'] = None
            calib_info['t_coeff'] = None
            calib_info['category'] = '2rgb'

        return calib_info


class CalibrationScaler:
    @staticmethod
    def scale_intrinsic(calib_info, target_width, target_height, camera_type='depth'):
        """
        缩放相机内参到目标分辨率

        Args:
            calib_info: 初始信息
            target_width: 目标宽度
            target_height: 目标高度
            camera_type: 'l' (左IR/深度相机) 或 'rgb'

        Returns:
            缩放后的calib_info (新字典)
        """
        if camera_type == 'depth' or camera_type == 'disp':
            # 缩放左相机/深度相机
            orig_width = calib_info['l_width']
            orig_height = calib_info['l_height']

            scale_x = target_width / orig_width
            scale_y = target_height / orig_height

            # 缩放内参
            K_l = calib_info['l_intrinsic']['K_l'].copy()
            K_l[0, 0] *= scale_x  # fx
            K_l[1, 1] *= scale_y  # fy
            K_l[0, 2] *= scale_x  # cx
            K_l[1, 2] *= scale_y  # cy

            calib_info['l_width'] = target_width
            calib_info['l_height'] = target_height
            calib_info['l_intrinsic'] = {
                'fx': float(K_l[0, 0]),
                'fy': float(K_l[1, 1]),
                'cx': float(K_l[0, 2]),
                'cy': float(K_l[1, 2]),
                'K_l': K_l
            }

            # 更新P1矩阵
            P1 = calib_info['P1'].copy()
            P1[0, :] *= scale_x
            P1[1, :] *= scale_y
            calib_info['P1'] = P1

        elif camera_type == 'rgb':
            # 缩放RGB相机
            if calib_info['rgb_intrinsic'] is None:
                return calib_info

            orig_width = calib_info['rgb_width']
            orig_height = calib_info['rgb_height']

            scale_x = target_width / orig_width
            scale_y = target_height / orig_height

            # 缩放内参
            K_rgb = calib_info['rgb_intrinsic']['K'].copy()
            K_rgb[0, 0] *= scale_x  # fx
            K_rgb[1, 1] *= scale_y  # fy
            K_rgb[0, 2] *= scale_x  # cx
            K_rgb[1, 2] *= scale_y  # cy

            calib_info['rgb_width'] = target_width
            calib_info['rgb_height'] = target_height
            calib_info['rgb_intrinsic'] = {
                'fx': float(K_rgb[0, 0]),
                'fy': float(K_rgb[1, 1]),
                'cx': float(K_rgb[0, 2]),
                'cy': float(K_rgb[1, 2]),
                'K': K_rgb
            }

        return calib_info

    @staticmethod
    def update_alignment_coeffs(calib_info):
        """
        重新计算对齐系数 r_coeff 和 t_coeff
        当RGB内参改变时需要调用
        """
        if calib_info['rgb_intrinsic'] is None:
            return calib_info

        K_rgb = calib_info['rgb_intrinsic']['K']

        if 'R_ir2rgb' in calib_info and 'invPR1' in calib_info:
            R_ir2rgb = calib_info['R_ir2rgb']
            invPR1 = calib_info['invPR1']
            T_ir2rgb = calib_info['T_ir2rgb']

            R_proj = K_rgb @ R_ir2rgb @ invPR1
            calib_info['r_coeff'] = {
                'r0': float(R_proj[0, 0]), 'r1': float(R_proj[0, 1]), 'r2': float(R_proj[0, 2]),
                'r3': float(R_proj[1, 0]), 'r4': float(R_proj[1, 1]), 'r5': float(R_proj[1, 2]),
                'r6': float(R_proj[2, 0]), 'r7': float(R_proj[2, 1]), 'r8': float(R_proj[2, 2])
            }

            T_proj = K_rgb @ T_ir2rgb
            calib_info['t_coeff'] = {
                't0': float(T_proj[0, 0]),
                't1': float(T_proj[1, 0]),
                't2': float(T_proj[2, 0])
            }

        return calib_info

""" ------------------------------------------ [RGB/Depth匹配器] ------------------------------------------ """
class SensorDataMatcher:
    def __init__(self, max_time_diff=200, max_queue_size=20):
        self.rgb_buffer = deque(maxlen=max_queue_size)
        self.depth_buffer = deque(maxlen=max_queue_size)
        self.max_time_diff = max_time_diff
        self.max_queue_size = max_queue_size
        self.lock = Lock()
        self.logger = init_logger(RMSL_LOG_PATH)

        self.matched_count = 0
        self.rgb_dropped = 0
        self.depth_dropped = 0

        # update flag
        self.depth_updated = False

    def add_rgb_data(self, img_rgb, timestamp):
        with self.lock:
            data = {'img': img_rgb, 'timestamp': timestamp}
            self.rgb_buffer.append(data)
            if len(self.rgb_buffer) > self.max_queue_size:
                self.rgb_buffer.popleft()
                self.rgb_dropped += 1

    def add_depth_data(self, img_disp, img_depth, timestamp, pse_color_depth_img=None):
        with self.lock:
            data = {
                'disp': img_disp,
                'depth': img_depth,
                'pse_color': pse_color_depth_img,
                'timestamp': timestamp
            }
            self.depth_buffer.append(data)
            if len(self.depth_buffer) > self.max_queue_size:
                self.depth_buffer.popleft()
                self.depth_dropped += 1

            self.depth_updated = True

    def find_best_match(self):
        with self.lock:
            if not self.rgb_buffer or not self.depth_buffer:
                self.logger.log_to_file(f" ⚠️ [Warn] Frame buffer empty!")
                time.sleep(0.01)
                return None, None

            if not self.depth_updated:
                time.sleep(0.01)
                return None, None

            latest_depth = self.depth_buffer[-1]
            target_timestamp = latest_depth['timestamp']

            best_rgb = None
            best_rgb_index = -1
            min_time_diff = float('inf')

            for i in range(len(self.rgb_buffer) - 1, -1, -1):
                rgb_data = self.rgb_buffer[i]
                rgb_timestamp = rgb_data['timestamp']

                if rgb_timestamp < target_timestamp:
                    time_diff = target_timestamp - rgb_timestamp

                    if time_diff > self.max_time_diff:
                        # Found old frame, skip
                        break

                    if time_diff < min_time_diff:
                        min_time_diff = time_diff
                        best_rgb = rgb_data
                        best_rgb_index = i

                        if time_diff < 5:
                            break
                else:
                    # future rgb frame
                    continue

            if best_rgb and min_time_diff <= self.max_time_diff:
                # Success match
                # a Get success matched depth frame
                matched_depth = self.depth_buffer.pop()

                # b. Pop success matched rgb frame
                del self.rgb_buffer[best_rgb_index]

                # c. Clean up obsolete frame data
                rgb_ts_to_clear = best_rgb['timestamp']
                cleared_count = 0
                while self.rgb_buffer and self.rgb_buffer[0]['timestamp'] < rgb_ts_to_clear:
                    self.rgb_buffer.popleft()
                    cleared_count += 1

                # if cleared_count > 0:
                #     self.logger.log_to_file(f" ✅ [Success] Matched (Diff:{min_time_diff:.2f}ms). Cleared {cleared_count} stale RGB frames.")
                # else:
                #     self.logger.log_to_file(f" ✅ [Success] Matched (Diff:{min_time_diff:.2f}ms).")

                self.matched_count += 1

                # update
                self.depth_updated = False
                return best_rgb, matched_depth
            else:
                # Failed match
                if len(self.depth_buffer) > 0:
                    oldest_depth_ts = self.depth_buffer[0]['timestamp']
                    if target_timestamp - oldest_depth_ts > self.max_time_diff * 4:
                        self.depth_buffer.popleft()
                        # self.logger.log_to_file(f" ⚠️ [Warn] Discarded one stale Depth frame (TS:{oldest_depth_ts}).")

                # self.logger.log_to_file(f" 🚨 [Error] Failed to find matched RGB for latest Depth (TS:{target_timestamp}). Min Diff: {min_time_diff if min_time_diff != float('inf') else 'inf'} ms.")
                # update
                self.depth_updated = False
                return None, None



    def clear(self):
        """清空缓冲区"""
        with self.lock:
            self.rgb_buffer.clear()
            self.depth_buffer.clear()

    def get_stats(self):
        """获取统计信息"""
        with self.lock:
            return {
                'matched': self.matched_count,
                'rgb_buffered': len(self.rgb_buffer),
                'depth_buffered': len(self.depth_buffer),
                'rgb_dropped': self.rgb_dropped,
                'depth_dropped': self.depth_dropped
            }

""" ------------------------------------------ [AllStream管理器] ------------------------------------------ """
class StreamManager:
    def __init__(self, uvc_configs: dict, en_rgb_pointcloud: bool = True):
        self.logger = init_logger(RMSL_LOG_PATH)
        self.uvc_configs = uvc_configs
        self.en_rgb_pointcloud = en_rgb_pointcloud

        """ ==================================== Queue ==================================== """
        self.raw_rgb_queue = BoundedQueue(maxsize=5, name="raw_rgb")
        self.raw_depth_queue = BoundedQueue(maxsize=5, name="raw_depth")
        self.raw_ir_queue = BoundedQueue(maxsize=2, name="raw_ir")

        """ ==================================== CaptureThread ==================================== """
        self.rgb_thread: Optional[FFmpegCaptureThread] = None
        self.depth_thread: Optional[FFmpegCaptureThread] = None
        self.ir_thread: Optional[FFmpegCaptureThread] = None

        self.stream_started = False
        """ ==================================== FrameMatchThread ==================================== """
        # 时间戳匹配队列（处理线程 -> 主线程维护）
        self.matcher = SensorDataMatcher(max_time_diff=200, max_queue_size=20)

        # UI显示缓存（处理线程 -> UI读取）
        self.ui_rgb = None
        self.ui_depth = None
        self.ui_depth_color = None
        self.ui_pointcloud = None
        self.ui_ir_left = None
        self.ui_ir_right = None
        self.ui_disp_diff = None
        self.ui_lock = Lock()

        # Save packet
        self.save_data = MatchedDataPack(0)

        # 配置
        self.running = False
        self.mode_3d = False
        self.debug_mode = False

        self.allocated_buffer = False

        """ ==================================== Camera UVC Config ==================================== """
        # TODO: if enable debug mode, rgb stream resolution change to: 1080x1280
        self.stream_configs = {
            'rgb': StreamConfig(
                uvc_name=self.uvc_configs['rgb']['name'],
                width=self.uvc_configs['rgb']['width'],
                height=self.uvc_configs['rgb']['height'],
                fps=self.uvc_configs['rgb']['fps'],
                pixel_format=self.uvc_configs['rgb']['format'].lower()
            ),
            'depth': StreamConfig(
                uvc_name=self.uvc_configs['depth']['name'],
                width=self.uvc_configs['depth']['width'],
                height=self.uvc_configs['depth']['height'],
                fps=self.uvc_configs['depth']['fps'],
                pixel_format=self.uvc_configs['depth']['format'].lower()
            ),
            'ir': StreamConfig(
                uvc_name=self.uvc_configs['ir']['name'],
                width=self.uvc_configs['ir']['width'],
                height=self.uvc_configs['ir']['height'],
                fps=self.uvc_configs['ir']['fps'],
                pixel_format=self.uvc_configs['ir']['format'].lower()
            )
        }

        """ ==================================== Stream Enable ==================================== """
        self.stream_enabled = {
            'rgb': True,  # 默认开启
            'depth': True,  # 默认开启
            'ir': False  # 默认关闭，DEBUG模式开启
        }

        """ ==================================== Status Tracking ==================================== """
        # 统计信息
        self.stats = {
            'rgb_fps': 0.0,
            'depth_fps': 0.0,
            'ir_fps': 0.0,
            'rgb_drop_rate':0.0,
            'rgb_repeat_rate': 0,
            'depth_drop_rate':0.0,
            'depth_repeat_rate': 0.0,
            'ir_drop_rate':0.0,
            'ir_repeat_rate': 0,
            'process_rgb_fps': 0.0,
            'process_depth_fps': 0.0,
            'process_ir_fps': 0.0,
            'process_pointcloud_fps': 0.0,
            'matched_count': 0
        }
        self.stats_lock = Lock()

    def _preallocate_buffers(self):
        """预分配所有图像缓冲区"""
        rgb_cfg = self.stream_configs['rgb']
        depth_cfg = self.stream_configs['depth']
        ir_cfg = self.stream_configs['ir']

        # RGB缓冲区
        self.ui_rgb = np.zeros(
            (rgb_cfg.height, rgb_cfg.width, 3),
            dtype=np.uint8
        )

        # Depth缓冲区
        self.ui_depth = np.zeros(
            (depth_cfg.height, depth_cfg.width),
            dtype=np.uint16
        )
        self.ui_depth_color = np.zeros(
            (depth_cfg.height, depth_cfg.width, 3),
            dtype=np.uint8
        )

        # 点云缓冲区(实际points是稀疏动态，所以不预分配)
        self.ui_pointcloud = None

        # IR缓冲区（仅DEBUG模式使用）
        if self.debug_mode:
            self.ui_ir_left = np.zeros(
                (ir_cfg.height, ir_cfg.width, 3),
                dtype=np.uint8
            )
            self.ui_ir_right = np.zeros(
                (ir_cfg.height, ir_cfg.width, 3),
                dtype=np.uint8
            )
            self.ui_disp_diff = np.zeros(
                (ir_cfg.height, ir_cfg.width, 3),
                dtype=np.uint8
            )
        else:
            self.ui_ir_left = None
            self.ui_ir_right = None
            self.ui_disp_diff = None

    """ ==================================== Config API ==================================== """
    def set_debug_mode(self, enabled: bool):
        """设置DEBUG模式（会自动启用/禁用IR流）"""
        if self.running:
            self.logger.log_to_file(" ⚠️ [Warning] Cannot change debug mode while running, please restart RMSLViewer tool.")
            return

        self.debug_mode = enabled
        self.stream_enabled['ir'] = enabled
        self.logger.log_to_file(f" 🔧 [Config] Debug mode: {enabled}, IR stream: {enabled}")

        if enabled:
            # TODO: FIXED SIZE, 2025-11-17
            self.update_stream_config('rgb', width= 1080, height=1280)

        # ✅ 重新分配缓冲区
        self.logger.log_to_file(f" 🔧 [Config] Re-allocate buffers!")
        self._preallocate_buffers()

    def enable_stream(self, stream_type: str, enabled: bool):
        """单独控制流开关"""
        if stream_type in self.stream_enabled:
            self.stream_enabled[stream_type] = enabled
            self.logger.log_to_file(f" 🔧 [Config] Stream '{stream_type}': {enabled}")

    def update_stream_config(self, stream_type: str, **kwargs):
        """更新流配置"""
        if stream_type in self.stream_configs:
            config = self.stream_configs[stream_type]
            for key, value in kwargs.items():
                if hasattr(config, key):
                    setattr(config, key, value)
            self.logger.log_to_file(f" 🔧 [Config] Updated '{stream_type}' config: {kwargs}")

            # ✅ 重新分配缓冲区
            self.logger.log_to_file(f" 🔧 [Config] '{stream_type}' Resolution change, Re-allocate buffers!")
            self._preallocate_buffers()

    """ ==================================== ALL Stream Manage ==================================== """

    def start(self):
        # ORDER: RGB --> DEPTH --> IR(Optional)
        if self.running:
            self.logger.log_to_file(" ⚠️ [Warning] StreamManager already running")
            return False

        """ ==================================== PreAllocate Memory ==================================== """
        if not self.allocated_buffer:
            self._preallocate_buffers()
            self.allocated_buffer = False

        self.logger.log_to_file(" 🚀 [Info] Starting StreamManager...")
        self.running = True

        # reset queue
        self.raw_rgb_queue.reset()
        self.raw_depth_queue.reset()
        self.raw_ir_queue.reset()

        # flag
        success = True

        # 1. RGB
        if self.stream_enabled['rgb']:
            success &= self._start_stream('rgb')
            time.sleep(0.2)

        # 2. DEPTH
        if self.stream_enabled['depth']:
            success &= self._start_stream('depth')
            time.sleep(0.2)

        # 3. IR (仅DEBUG模式)
        if self.stream_enabled['ir']:
            success &= self._start_stream('ir')
            time.sleep(0.2)

        # update
        self.stream_started = True

        if not success:
            self.logger.log_to_file(" 💥 [Error] Failed to start all streams!")
            self.stop()
            return False

        self.logger.log_to_file(" ✅ [Info] StreamManager started successfully")
        return True

    def stop(self):
        """ Order: IR -> DEPTH -> RGB）"""
        if not self.running:
            return

        self.logger.log_to_file(" 🎉 [Info] Stopping StreamManager...")
        self.running = False
        self.stream_started = False

        # 按逆序停止流
        if self.ir_thread:
            self._stop_stream('ir')
            time.sleep(0.3)

        if self.depth_thread:
            self._stop_stream('depth')
            time.sleep(0.3)

        if self.rgb_thread:
            self._stop_stream('rgb')
            time.sleep(0.3)

        # 关闭队列
        self.raw_rgb_queue.close()
        self.raw_depth_queue.close()
        self.raw_ir_queue.close()

        self.logger.log_to_file(" ✅ [Info] StreamManager stopped")
        self._print_stats()

    def _start_stream(self, stream_type: str) -> bool:
        """启动单个流"""
        # ✅ 安全检查：如果旧线程还在运行，先停止
        if stream_type == 'rgb' and self.rgb_thread and self.rgb_thread.is_alive():
            self.logger.log_to_file(f" ⚠️ [Warning] {stream_type} thread still running, stopping first...")
            self._stop_stream(stream_type)
            time.sleep(0.5)  # 等待完全停止
        elif stream_type == 'depth' and self.depth_thread and self.depth_thread.is_alive():
            self._stop_stream(stream_type)
            time.sleep(0.5)
        elif stream_type == 'ir' and self.ir_thread and self.ir_thread.is_alive():
            self._stop_stream(stream_type)
            time.sleep(0.5)

        config = self.stream_configs[stream_type]
        queue_map = {
            'rgb': self.raw_rgb_queue,
            'depth': self.raw_depth_queue,
            'ir': self.raw_ir_queue
        }

        thread = FFmpegCaptureThread(
            stream_mgr=self,
            output_queue=queue_map[stream_type],
            uvc_cam_name=config.uvc_name,
            width=config.width,
            height=config.height,
            fps=config.fps,
            pixel_format=config.pixel_format
        )

        thread.start()

        # 等待启动完成
        if not thread.wait_until_started(timeout=10):
            self.logger.log_to_file(f" 💥 [Error] Failed to start {stream_type} stream")
            thread.stop()
            return False

        # 保存线程引用
        if stream_type == 'rgb':
            self.rgb_thread = thread
        elif stream_type == 'depth':
            self.depth_thread = thread
        elif stream_type == 'ir':
            self.ir_thread = thread

        return True

    def _stop_stream(self, stream_type: str):
        """停止单个流"""
        if stream_type == 'rgb':
            if self.rgb_thread:
                self.rgb_thread.stop(timeout=2)
                self.rgb_thread = None

        if stream_type == 'depth':
            if self.depth_thread:
                self.depth_thread.stop(timeout=2)
                self.depth_thread = None

        if stream_type == 'ir':
            if self.ir_thread:
                self.ir_thread.stop(timeout=2)
                self.ir_thread = None

    """ ==================================== UI DATA Interface ==================================== """

    def update_ui_data(self, **kwargs):
        """更新UI显示数据"""
        with self.ui_lock:
            for key, value in kwargs.items():
                attr_name = f'ui_{key}'
                if hasattr(self, attr_name):
                    buffer = getattr(self, attr_name)
                    # ✅ 点云直接赋值（不预分配）
                    if key == 'pointcloud':
                        setattr(self, attr_name, value)
                    # 其他数据使用预分配缓冲区
                    elif value is not None and buffer is not None:
                        try:
                            np.copyto(buffer, value)
                        except ValueError as e:
                            # 形状不匹配时回退到直接赋值
                            self.logger.log_to_file(
                                f" ⚠️ [Warning] Shape mismatch for {key}, "
                                f"falling back to direct assignment: {e}"
                            )
                            setattr(self, attr_name, value)
                    else:
                        setattr(self, attr_name, value)

    def get_ui_data(self) -> dict:
        """UI线程获取显示数据"""
        with self.ui_lock:
            return {
                'rgb': self.ui_rgb,
                'depth': self.ui_depth,
                'depth_color': self.ui_depth_color,
                'pointcloud': self.ui_pointcloud,
                'ir_left': self.ui_ir_left,
                'ir_right': self.ui_ir_right,
                'disp_diff': self.ui_disp_diff
            }

    """ ==================================== Stats Tracking ==================================== """
    def update_stats(self, key: str, value):
        """更新统计信息"""
        with self.stats_lock:
            if key in self.stats:
                self.stats[key] = value

    def get_stats(self) -> Dict[str, Any]:
        """获取统计信息"""
        with self.stats_lock:
            stats = self.stats.copy()

            # 添加队列统计
            stats['queues'] = {
                'rgb': self.raw_rgb_queue.get_stats(),
                'depth': self.raw_depth_queue.get_stats(),
                'ir': self.raw_ir_queue.get_stats()
            }

            # add matched count
            stats['matcher'] = self.matcher.get_stats()

            return stats

    def _print_stats(self):
        """打印统计信息"""
        stats = self.get_stats()
        self.logger.log_to_file("=" * 80)
        self.logger.log_to_file(" 📊 StreamManager Statistics:")
        self.logger.log_to_file(f"  [RGB  ] Capture FPS: {stats['rgb_fps']:.2f}, Post-Process FPS: {stats['process_rgb_fps']:.2f}")
        self.logger.log_to_file(f"  [DEPTH] Capture FPS: {stats['depth_fps']:.2f}, Post-Process FPS: {stats['process_depth_fps']:.2f}")
        self.logger.log_to_file(f"  [IR   ] Capture FPS: {stats['ir_fps']:.2f}")
        self.logger.log_to_file(f"  [PC   ] Post-Process FPS: {stats['process_pointcloud_fps']:.2f}")
        self.logger.log_to_file(f"  [RGB  ] Drop Rate: {stats['rgb_drop_rate']:.2f}%, Repeat Rate: {stats['rgb_repeat_rate']:.2f}%")
        self.logger.log_to_file(f"  [DEPTH] Drop Rate: {stats['depth_drop_rate']:.2f}%, Repeat Rate: {stats['depth_repeat_rate']:.2f}%")
        self.logger.log_to_file(f"  [IR   ] Drop Rate: {stats['ir_drop_rate']:.2f}%, Repeat Rate: {stats['ir_repeat_rate']:.2f}%")
        self.logger.log_to_file(f"  [DEPTH & RGB Matched Count]: {stats['matcher']['matched']}")

        self.logger.log_to_file("  " + "=" * 78)
        for name, queue_stats in stats['queues'].items():
            self.logger.log_to_file(f"  Queue '{name}': drops={queue_stats['total_drops']}, "
                                    f"rate={queue_stats['drop_rate']:.2%}")
        self.logger.log_to_file("=" * 80)


""" ------------------------------------------ [Capturer管理器] ------------------------------------------ """
class FFmpegCaptureThread(Thread):
    def __init__(self,
                 stream_mgr: StreamManager,
                 output_queue: BoundedQueue,
                 uvc_cam_name: str = 'RMSL321_RGB',
                 width: int = 1280,
                 height: int = 1080,
                 fps: int = 30,
                 pixel_format: str = 'yuyv422'):
        super().__init__(daemon=True)
        self.logger = init_logger(RMSL_LOG_PATH)
        self.stream_mgr = stream_mgr
        self.output_queue = output_queue
        self.uvc_cam_name = uvc_cam_name
        self.stream_type = uvc_cam_name.split('_')[1].lower()
        self.width = width
        self.height = height
        self.fps = fps
        self.pixel_format = pixel_format

        self._stop_event = Event()  # 停止信号
        self._started_event = Event()  # 启动完成信号

        self.process = None
        self.pipe = None
        self.frame_count = 0
        self.frame_size = self._calculate_frame_size()

        # 预分配内存
        self.raw_buffer = bytearray(self.frame_size)
        self.raw_view = memoryview(self.raw_buffer)

        # process buffer
        self.yuyv_buffer = np.zeros((self.height, self.width), dtype=np.uint16)
        self.nv12_buffer = np.zeros((self.height * 3 // 2, self.width), dtype=np.uint8)
        self.rgb_buffer = np.zeros((self.height, self.width, 3), dtype=np.uint8)

        # Header
        self.header_size = 28
        self.header_struct = struct.Struct('<HHHHHH I Q I')

        self.ffmpeg_command = [
            FFMPEG,
            '-f', 'dshow',
            '-rtbufsize', '64M',
            '-video_size', f'{self.width}x{self.height}',
            '-framerate', str(fps),
            '-pixel_format', self.pixel_format,
            '-i', f'video={self.uvc_cam_name}',
            '-c:v', 'rawvideo',
            '-f', 'image2pipe',
            '-'
        ]

        if sys.platform == 'win32':
            self.creation_flags = subprocess.CREATE_NO_WINDOW  # 0x08000000
        else:
            self.creation_flags = 0

        # del duplicate
        self.repeat_cnt = 0
        self.last_ts = -1  # rgb depth
        self.last_type = -1  # ir

        # performance
        self.stat_interval = 30 # s
        self.fps_counter = 0
        self.fps_start_time = time.time()

        # 自适应丢帧检测
        self.learning_phase = True  # 学习阶段标志
        self.learning_samples = []  # 存储学习阶段的帧间隔
        self.learning_sample_size = 50  # 学习样本数量（可配置）

        # 统计值（学习完成后计算）
        self.avg_frame_interval = None  # 平均帧间隔
        self.std_frame_interval = None  # 标准差
        self.max_normal_interval = None  # 正常最大间隔

        # 丢帧检测
        self.dropped_frames_count = 0

    def _calculate_frame_size(self):
        format_sizes = {
            'yuyv422': self.width * self.height * 2,
            'bgr24': self.width * self.height * 3,
            'rgb24': self.width * self.height * 3,
            'nv12': self.width * self.height * 3 // 2,
        }
        return format_sizes.get(self.pixel_format, self.width * self.height * 2)

    def run(self):
        self.logger.log_to_file(
            f" 🎉 [Info] [{self.stream_type.upper()}] Starting stream: {self.uvc_cam_name}, [Config] width: {self.width}, height: {self.height}, fps: {self.fps}, pixel_format: {self.pixel_format}"
        )

        try:
            self.pipe = subprocess.Popen(
                self.ffmpeg_command,
                stdout=subprocess.PIPE,
                stderr=subprocess.PIPE,
                bufsize=self.frame_size,
                creationflags=self.creation_flags
            )

            time.sleep(0.2)
            if self.pipe.poll() is not None:
                stderr_output = self.pipe.stderr.read().decode()
                self.logger.log_to_file(
                    f" 💥 [Error] [FFmpegCaptureThread] [{self.stream_type.upper()}] FFmpeg failed to start: {stderr_output}"
                )
                return

            self._started_event.set()
            self.logger.log_to_file(
                f" ✅ [Info] [{self.stream_type.upper()}] Stream started successfully"
            )

            while not self._stop_event.is_set():
                bytes_read = self.pipe.stdout.readinto(self.raw_view)

                if bytes_read == self.frame_size:
                    self._process_and_queue_frame()
                elif bytes_read == 0:
                    if self.pipe.poll() is not None:
                        self.logger.log_to_file(
                            f" 💥 [Error] [FFmpegCaptureThread] [{self.stream_type.upper()}] FFmpeg process terminated"
                        )
                        break
                else:
                    self.logger.log_to_file(
                        f" ⚠️ [Warning] [FFmpegCaptureThread] [{self.stream_type.upper()}] Incomplete frame: {bytes_read} / {self.frame_size} bytes"
                    )
                    time.sleep(0.001)
        except FileNotFoundError:
            self.logger.log_to_file(
                f" 💥 [Error] [FFmpegCaptureThread] [{self.stream_type.upper()}] FFmpeg not found: {self.ffmpeg_command[0]}"
            )
        except Exception as e:
            self.logger.log_to_file(
                f" 💥 [Error] [FFmpegCaptureThread] [{self.stream_type.upper()}] Exception: {e}"
            )
        finally:
            self._cleanup()

    def _process_and_queue_frame(self):
        """处理并入队一帧数据"""
        # 解析Header
        header_bytes = bytes(self.raw_buffer[:self.header_size])
        try:
            _fields = self.header_struct.unpack(header_bytes)
        except struct.error as e:
            self.logger.log_to_file(f" 💥 [Error] [{self.stream_type.upper()}] Header unpack error: {e}")
            return

        _keys = ['magic', 'version', 'size', 'type', 'width', 'height',
                 'sequence', 'timestamp', 'reserve']
        result = dict(zip(_keys, _fields))

        # 验证Magic
        if result['magic'] != 0xA55A:
            return

        ts = int(result['timestamp'] / 1000) # us to ms
        frame_type = result['type']

        # 去重逻辑
        if ts < self.last_ts:
            self.logger.log_to_file(f" ⚠️ [Warn] [{self.stream_type.upper()}] Timestamp rollback: {self.last_ts}ms -> {ts}ms")
            return

        # 丢帧检测（在去重逻辑之前）
        if 0 < self.last_ts < ts:  # 确保有有效的间隔
            interval = ts - self.last_ts

            if self.learning_phase:
                self._learning_phase_update(interval)
            else:
                self._check_frame_drop(ts, self.last_ts, interval)

        if ts == self.last_ts:
            if frame_type in [1, 2] or frame_type == self.last_type:
                self.repeat_cnt += 1
                # self.logger.log_to_file(f" ⚠️ [Warn] [{self.stream_type.upper()}] ID: {result['sequence']}, Timestamp repeat: {self.last_ts}ms == {ts}ms")
                return

        self.last_ts = ts
        self.last_type = frame_type

        # 处理帧数据
        frame = self._process_frame(frame_type)
        if frame is None:
            return

        # 创建FrameData
        label = FFmpegCaptureThread._get_label(frame_type)
        frame_data = FrameData(
            timestamp=ts,
            frame_id=self.frame_count,
            label=label,
            data=frame
        )
        # update
        # ✅ 非阻塞 put，避免卡死
        if not self.output_queue.put(frame_data, drop_old=True):
            # 放入失败也不影响继续采集
            pass
        else:
            self.frame_count += 1

        # cal fps
        self._update_fps_stats()

    def _learning_phase_update(self, interval):
        if interval < 500:
            self.learning_samples.append(interval)

        if len(self.learning_samples) >= self.learning_sample_size:
            completion_reason = f"collected {len(self.learning_samples)} samples"
            self._complete_learning_phase(completion_reason)

    def _complete_learning_phase(self, reason):
        if not self.learning_samples:
            self.avg_frame_interval = self.fps  # 默认25fps
            self.std_frame_interval = 10
        else:
            # 计算统计值
            intervals = np.array(self.learning_samples)
            self.avg_frame_interval = np.mean(intervals)
            self.std_frame_interval = np.std(intervals)

            p95 = np.percentile(intervals, 95)
            p99 = np.percentile(intervals, 99)

            # 设置正常最大间隔（可以用p95或p99，或者mean+3*std）这里使用较为宽松的策略：max(p95, mean+3*std)
            self.max_normal_interval = max(p95, self.avg_frame_interval + 3 * self.std_frame_interval)
            self.max_normal_interval = min(self.max_normal_interval, self.avg_frame_interval * 3)

        self.learning_phase = False

    def _check_frame_drop(self, current_ts, last_ts, interval):
        """基于学习的统计值检测丢帧"""
        if self.avg_frame_interval is None:
            return

        # 判断是否超过正常范围
        if interval > self.max_normal_interval:
            # 估算丢帧数量
            estimated_dropped = round(interval / self.avg_frame_interval) - 1

            if estimated_dropped > 0:
                self.dropped_frames_count += estimated_dropped

    def _process_frame(self, frame_type):
        try:
            if frame_type == 2:
                # YUYV uint16 depth data
                data_view = np.frombuffer(
                    self.raw_buffer[:self.height * self.width * 2],
                    dtype=np.uint16
                ).reshape((self.height, self.width))

                # overlap header
                if data_view.shape[1] >= self.header_size:
                    data_view[0, :self.header_size] = data_view[1, :self.header_size]

                return data_view.copy()

            elif frame_type in [1, 4, 5]:
                # NV12 formats
                nv12_view = np.frombuffer(
                    self.raw_buffer,
                    dtype=np.uint8,
                    count=self.height * self.width * 3 // 2
                ).reshape(self.height * 3 // 2, self.width)

                rgb_frame = cv2.cvtColor(nv12_view, cv2.COLOR_YUV2RGB_NV12)

                # 修复header污染
                if rgb_frame.shape[1] >= self.header_size:
                    rgb_frame[0, :self.header_size] = rgb_frame[1, :self.header_size]

                return rgb_frame

            else:
                return None

        except Exception as e:
            self.logger.log_to_file(
                f" 💥 [Error] [{self.stream_type.upper()}] Frame processing error: {e}"
            )
            return None

    @staticmethod
    def _get_label(frame_type):
        labels = {1: 'rgb', 2: 'depth', 4: 'ir_l', 5: 'ir_r'}
        return labels.get(frame_type, 'unknown')

    def _update_fps_stats(self):
        """更新FPS统计"""
        self.fps_counter += 1
        current_time = time.time()
        elapsed = current_time - self.fps_start_time

        if elapsed >= self.stat_interval:
            fps = self.fps_counter / elapsed
            self.stream_mgr.update_stats(f'{self.stream_type}_fps', fps)

            if not self.learning_phase and self.dropped_frames_count > 0:
                total_frames = self.frame_count + self.dropped_frames_count
                drop_rate = (self.dropped_frames_count / total_frames * 100) if total_frames > 0 else 0
                # actual_fps = 1000.0 / self.avg_frame_interval if self.avg_frame_interval else 0

                # self.logger.log_to_file(
                #     f" 📊 [{self.stream_type.upper()}] Frame drop stats in last {elapsed:.1f}s, Estimate FPS: {actual_fps:.1f}, : "
                #     f"Received: {self.frame_count}, "
                #     f"Dropped: {self.dropped_frames_count}, "
                #     f"Drop rate: {drop_rate:.2f}%"
                # )
                self.stream_mgr.update_stats(f'{self.stream_type}_drop_rate', drop_rate)
            self.stream_mgr.update_stats(f'{self.stream_type}_repeat_rate', (self.repeat_cnt / self.frame_count) * 100)

            self.fps_counter = 0
            self.fps_start_time = current_time

    def stop(self, timeout=5):
        """停止线程"""
        if not self.is_alive():
            return

        self.logger.log_to_file(
            f" 🎉 [Info] [{self.stream_type.upper()}] Stopping stream..."
        )

        self._stop_event.set()

        # 等待线程结束
        self.join(timeout=timeout)

        if self.is_alive():
            self.logger.log_to_file(
                f" ⚠️ [Warning] [{self.stream_type.upper()}] Thread did not stop gracefully"
            )
            # 强制清理
            self._cleanup()

    def wait_until_started(self, timeout=10):
        return self._started_event.wait(timeout=timeout)

    def _cleanup(self):
        """清理资源"""
        self.logger.log_to_file(
            f" 🧹 [Info] [{self.stream_type.upper()}] Cleaning up..."
        )

        try:
            if self.pipe:
                if self.pipe.poll() is None:
                    self.pipe.terminate()
                    try:
                        self.pipe.wait(timeout=3)
                    except subprocess.TimeoutExpired:
                        self.pipe.kill()
                        self.pipe.wait()

                # ✅ 关键：关闭所有文件句柄
                try:
                    if self.pipe.stdout:
                        self.pipe.stdout.close()
                    if self.pipe.stderr:
                        self.pipe.stderr.close()
                    if self.pipe.stdin:
                        self.pipe.stdin.close()
                except Exception as e:
                    self.logger.log_to_file(
                        f" ⚠️ [Warning] [{self.stream_type.upper()}] Error closing pipes: {e}"
                    )

                self.pipe = None
        except Exception as e:
            self.logger.log_to_file(
                f" ⚠️ [Warning] [{self.stream_type.upper()}] Cleanup error: {e}"
            )

        self.logger.log_to_file(
            f" ✅ [Info] [{self.stream_type.upper()}] Stopped. Frames processed: {self.frame_count}"
        )


class Colorizer:
    def __init__(self,
                 color_map: Optional[Dict[float, np.ndarray]] = None,
                 lut_size: int = 256,   # 建议默认从 65536 改为 256/1024
                 mode='jet_red2blue'):
        self.color_map = color_map if color_map else Colorizer.default_jet_map(mode=mode)
        self.lut_size = lut_size
        self.lut_bgr = None  # uint8 BGR
        self._tmp_f32 = None  # 复用的浮点缓冲
        self._idx = None      # 复用的索引缓冲（uint16 或 int32）
        self._build_lut()

    @staticmethod
    def default_jet_map(mode='jet_red2blue'):
        # RGB in [0,1]
        jet_red2blue = {
            0.00: np.array([0.5, 0, 0]),
            0.11: np.array([1, 0, 0]),
            0.35: np.array([1, 1, 0]),
            0.50: np.array([0, 1, 0]),
            0.64: np.array([0, 1, 1]),
            0.86: np.array([0, 0, 1]),
            1.00: np.array([0, 0, 0.5]),
        }
        jet_blue2red = {
            0.00: np.array([0, 0, 0.5]),
            0.11: np.array([0, 0, 1]),
            0.35: np.array([0, 1, 1]),
            0.50: np.array([0, 1, 0]),
            0.64: np.array([1, 1, 0]),
            0.86: np.array([1, 0, 0]),
            1.00: np.array([0.5, 0, 0]),
        }
        return jet_red2blue if mode == 'jet_red2blue' else jet_blue2red

    def _build_lut(self):
        # 1) 用 Python float 的 key 顺序生成 positions 和 colors，避免 float32 导致的字典查找误差
        items = sorted(self.color_map.items(), key=lambda kv: kv[0])  # [(key, value)]，key 为 Python float
        positions = np.array([k for k, _ in items], dtype=np.float64)  # 用于插值，不再回查 dict
        colors_rgb = np.array([v for _, v in items], dtype=np.float32)  # Kx3, [0,1]

        # 2) 构建 LUT（建议直接生成 BGR 的 uint8，减少后续内存拷贝）
        lut_positions = np.linspace(0.0, 1.0, self.lut_size, dtype=np.float32)
        rgb = np.empty((self.lut_size, 3), dtype=np.float32)
        for c in range(3):
            rgb[:, c] = np.interp(lut_positions, positions, colors_rgb[:, c])

        bgr = np.clip(np.rint(rgb[:, ::-1] * 255.0), 0, 255).astype(np.uint8)
        self.lut_bgr = bgr  # shape: (lut_size, 3), dtype=uint8

    def set_colormap(self, colormap: Dict[float, np.ndarray]):
        self.color_map = colormap
        self._build_lut()

    def _ensure_buffers(self, shape):
        # 预分配/复用中间缓冲，减少频繁分配
        if self._tmp_f32 is None or self._tmp_f32.shape != shape:
            self._tmp_f32 = np.empty(shape, dtype=np.float32)
        need_uint16 = self.lut_size <= 65536
        idx_dtype = np.uint16 if need_uint16 else np.int32
        if self._idx is None or self._idx.shape != shape or self._idx.dtype != idx_dtype:
            self._idx = np.empty(shape, dtype=idx_dtype)

    def colorize(self,
                 depth_image: np.ndarray,
                 depth_range: Optional[Tuple[float, float]] = None,
                 normalize: bool = True,
                 out: Optional[np.ndarray] = None) -> np.ndarray:
        depth = np.asarray(depth_image)
        if depth.ndim != 2:
            raise ValueError("depth_image 必须是单通道 2D 数组")

        self._ensure_buffers(depth.shape)

        # 计算可视化范围
        if depth_range is None:
            if normalize:
                valid_mask = depth > 0
                if np.any(valid_mask):
                    min_depth = float(depth[valid_mask].min())
                    max_depth = float(depth[valid_mask].max())
                else:
                    min_depth, max_depth = 0.0, 1.0
            else:
                min_depth = float(depth.min())
                max_depth = float(depth.max())
        else:
            min_depth, max_depth = map(float, depth_range)

        if not np.isfinite(min_depth) or not np.isfinite(max_depth):
            min_depth, max_depth = 0.0, 1.0
        if max_depth <= min_depth:
            max_depth = min_depth + 1e-6

        # 生成索引（原地/少临时）
        depth_f = depth.astype(np.float32, copy=False)  # 若已是 float32 则不拷贝
        tmp = self._tmp_f32
        np.subtract(depth_f, min_depth, out=tmp)  # tmp = depth - min_depth
        scale = (self.lut_size - 1) / (max_depth - min_depth)
        np.multiply(tmp, scale, out=tmp)          # tmp *= scale
        np.clip(tmp, 0, self.lut_size - 1, out=tmp)
        np.rint(tmp, out=tmp)                     # 四舍五入到最近色阶
        self._idx[...] = tmp.astype(self._idx.dtype, copy=False)  # 索引类型更小

        # 准备输出
        if out is None:
            out = np.empty(depth.shape + (3,), dtype=np.uint8)
        elif out.shape != depth.shape + (3,) or out.dtype != np.uint8:
            raise ValueError("out 的形状应为 (H, W, 3) 且 dtype=uint8")

        # 将索引映射到颜色（通道分开，避免大临时数组）, np.take 支持 out，可减少再分配
        np.take(self.lut_bgr[:, 0], self._idx, out=out[..., 0])  # B
        np.take(self.lut_bgr[:, 1], self._idx, out=out[..., 1])  # G
        np.take(self.lut_bgr[:, 2], self._idx, out=out[..., 2])  # R

        return out


class UtilsFile(object):
    def __init__(self):
        self.logger = init_logger(RMSL_LOG_PATH)

    def read_json(self, json_path: str) -> dict:
        if not os.path.exists(json_path):
            self.logger.log_to_file(f" 🚨 [Error] config file: {json_path} not exits!")
            raise FileNotFoundError(f" 🚨 [Error] config file: {json_path} not exits!")
        try:
            with open(json_path, 'r', encoding='utf-8') as file:
                config = json.load(file)
            return config
        except json.JSONDecodeError as e:
            self.logger.log_to_file(f" 🚨 [Error] Failed load config file: {json_path}, error log: {e}")
            raise KeyError(f" 🚨 [Error] Failed load config file: {json_path}, error log: {e}")

    def save_path_check(self, save_path: str):
        if not os.path.exists(save_path):
            os.makedirs(save_path)
            self.logger.log_to_file(f" 🎉 [Info] Success create save directory: {save_path}!")

    def save_image(self, img: np.ndarray, save_dir: str, prefix: str, timestamp: str, en_rgb2bgr: bool = False):
        if img is not None:
            img_name = f'{prefix}_{timestamp}.png'
            img_path = os.path.join(save_dir, img_name)
            if en_rgb2bgr:
                img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
            cv2.imwrite(img_path, img)
            self.logger.log_to_file(f' 🎉 [Info] Downloaded {prefix} image to dir: {save_dir}')


class UtilsPointCloud(object):
    def __init__(self):
        self.logger = init_logger(RMSL_LOG_PATH)

    def disp_to_depth(self,
                      disp: np.ndarray,
                      Q23: float,
                      Q32: float,
                      Q33: float = 0.0,
                      subpixel_value: int = 64,
                      zoom_ratio: float = 1.0,
                      out: Optional[np.ndarray] = None) -> Optional[np.ndarray]:
        if disp is None:
            self.logger.log_to_file(f" 🚨 [Error] Check disp image, not initial.")
            return None

        if not isinstance(disp, np.ndarray):
            self.logger.log_to_file(f" 🚨 [Error] Check disp image input, Invalid image.")
            return None

        if out is None:
            out = np.zeros(disp.shape, dtype=np.uint16)
        else:
            if out.shape != disp.shape or out.dtype != np.uint16:
                self.logger.log_to_file(
                    f" 🚨 [Error] Output buffer shape/dtype mismatch: "
                    f"expected {disp.shape} uint16, got {out.shape} {out.dtype}"
                )
                return None
            out.fill(0)

        valid_mask = disp != 0
        disp_float = disp.astype(np.float32)

        # 计算分母
        denominator = Q32 * disp_float
        if Q33 != 0.0:
            denominator += Q33 * subpixel_value

        # ✅ 有效分母掩码
        valid_denominator = (denominator != 0) & valid_mask

        if not np.any(valid_denominator):
            return out  # 没有有效深度

        # ✅ 计算深度（复用disp_float减少内存分配）
        depth_float = disp_float  # 复用内存
        depth_float[valid_denominator] = (
                Q23 * zoom_ratio * subpixel_value / denominator[valid_denominator]
        )
        depth_float[~valid_denominator] = 0

        # ✅ 转换为uint16（带范围限制）
        np.clip(depth_float, 0, 65535, out=depth_float)
        np.rint(depth_float, out=depth_float)
        out[...] = depth_float.astype(np.uint16)

        return out

    def generate_pointcloud(self,
                            img_depth,
                            fx, fy, cx, cy,
                            img_h, img_w,
                            min_dis, max_dis,
                            downsample_factor: int = 1,
                            convert_unit_m: bool = False,
                            en_valid_roi=False,
                            en_cen_roi=False):
        if downsample_factor > 1:
            img_depth = img_depth[::downsample_factor, ::downsample_factor]
            img_h, img_w = img_depth.shape
            # self.logger.log_to_file(f" 🎉 [Info] Downsample factor: {downsample_factor}, new image size: {img_w}x{img_h}.")

        fx = fx / downsample_factor
        fy = fy / downsample_factor
        cx = cx / downsample_factor
        cy = cy / downsample_factor

        if convert_unit_m:
            img_depth = img_depth / 1000
            min_dis = min_dis / 1000
            max_dis = max_dis / 1000

        valid_mask = (img_depth >= min_dis) & (img_depth <= max_dis)
        zero_val_cnt = np.sum(img_depth == 0)

        if en_valid_roi:
            if en_cen_roi:
                margin_w = int(0.25 * img_w)
                margin_h = int(0.25 * img_h)
            else:
                margin_w = int(0.05 * img_w)
                margin_h = int(0.05 * img_h)

            start_u, end_u = margin_w, img_w - margin_w
            start_v, end_v = margin_h, img_h - margin_h

            u, v = np.meshgrid(np.arange(start_u, end_u), np.arange(start_v, end_v))

            x_data = - (u - cx) * img_depth[start_v: end_v, start_u: end_u] / fx
            y_data = - (v - cy) * img_depth[start_v: end_v, start_u: end_u] / fy
            z_data = img_depth[start_v: end_v, start_u: end_u]

            valid_x_data = x_data[valid_mask[start_v:end_v, start_u:end_u]]
            valid_y_data = y_data[valid_mask[start_v:end_v, start_u:end_u]]
            valid_z_data = z_data[valid_mask[start_v:end_v, start_u:end_u]]

            pointcloud = np.column_stack((valid_x_data, valid_y_data, valid_z_data))
        else:
            """ Global point cloud """
            u, v = np.meshgrid(np.arange(0, img_w), np.arange(0, img_h))

            x_data = - (u - cx) * img_depth / fx
            y_data = - (v - cy) * img_depth / fy
            z_data = img_depth

            valid_x_data = x_data[valid_mask]
            valid_y_data = y_data[valid_mask]
            valid_z_data = z_data[valid_mask]

            pointcloud = np.column_stack((valid_x_data, valid_y_data, valid_z_data))

        # self.logger.log_to_file(f" 🎉 [Info] Success generate pointcloud, zero points count: {zero_val_cnt}")

        return pointcloud, zero_val_cnt

    def generate_rgb_pointcloud(self,
                                rgb,
                                depth,
                                rgbCamParam: dict,
                                near_clip_mm: int = 100,  # 裁剪近平面，单位 mm
                                far_clip_mm: int = 3000,  # 裁剪远平面，单位 mm
                                downsample_factor: int = 1,
                                convert_unit_m: bool = False):
        if downsample_factor > 1:
            rgb = rgb[::downsample_factor, ::downsample_factor]
            depth = depth[::downsample_factor, ::downsample_factor]
            # self.logger.log_to_file(f" 🎉 [Info] Downsample factor: {downsample_factor}, new image size: {depth.shape[1]}x{depth.shape[0]}.")

        height, width = depth.shape

        fx = rgbCamParam['fx'] / downsample_factor
        fy = rgbCamParam['fy'] / downsample_factor
        cx = rgbCamParam['cx'] / downsample_factor
        cy = rgbCamParam['cy'] / downsample_factor

        yv, xv = np.indices((height, width))

        if convert_unit_m:
            depth = depth / 1000
            raw_near_clip = near_clip_mm / 1000
            raw_far_clip = far_clip_mm / 1000
        else:
            raw_near_clip = near_clip_mm
            raw_far_clip = far_clip_mm
        valid_mask = (depth >= raw_near_clip) & (depth <= raw_far_clip)

        z = depth[valid_mask]
        x = -(xv[valid_mask] - cx) * z / fx
        y = -(yv[valid_mask] - cy) * z / fy

        colors = rgb[valid_mask]
        points = np.column_stack((x, y, z, colors))

        # self.logger.log_to_file(f"🎉 [Info] Successfully generated {len(points)} points.")

        return points.astype(np.float32)

    def prepare_glscatter_data(self, points, colors=None, default_rgbs=(0.9, 0.9, 0.9, 1.0), assume_bgr=False):
        """
           points: Nx3 或 Nx6（xyz[+rgb]），float32/float64
           colors: None 或 Nx3/Nx4 或 长度为3/4的一维常量颜色
           default_rgba: 当没有颜色时使用的常量颜色
           assume_bgr: 如果 colors 或 points 的颜色是 BGR，则会自动转成 RGB
           返回:
             pos:   Nx3 float32
             rgba:  Nx4 float32 (0..1)
       """
        if points is None:
            self.logger.log_to_file(f" 🚨 [Error] [Utils] Points is empty!")
            return np.zeros((0, 3), dtype=np.float32), None

        pts = np.asarray(points)
        if pts.ndim != 2 or pts.shape[1] not in (3, 6):
            self.logger.log_to_file(f" 🚨 [Error] [Utils] Wrong points shape: {points.shape}, is not (N,3) or (N,6)")
            raise ValueError(f"points shape {points.shape} is not (N,3) or (N,6)")

        # position
        pos = pts[:, :3].astype(np.float32, copy=False)

        # Color Priority Order: out color > points color > default color
        if colors is not None:
            col = np.asarray(colors)
        elif pts.shape[1] == 6:
            col = pts[:, 3:6]
        else:
            col = None

        if col is None:
            # use default color
            rgba = np.empty((pos.shape[0], 4), dtype=np.float32)
            rgba[:] = np.asarray(default_rgbs, dtype=np.float32)

            return pos, rgba

        # process shape
        if col.ndim == 1:
            if col.size == 3:
                col = np.tile(col[None, :], (pos.shape[0], 1))
            elif col.size == 4:
                col = np.tile(col[None, :], (pos.shape[0], 1))
            else:
                raise ValueError("colors: In one dimension, the length can only be 3 or 4")
        else:
            if col.shape[0] != pos.shape[0] or col.shape[1] not in (3, 4):
                raise ValueError("colors must be Nx3 or Nx4, with N being the same as points")

        col = col.astype(np.float32, copy=False)

        # BGR -> RGB（如果需要）
        if assume_bgr:
            if col.shape[1] == 3:
                col = col[:, [2, 1, 0]]
            else:
                col = col[:, [2, 1, 0, 3]]

        # Guard against empty col before calling max()
        if col.size == 0:
            rgba = np.empty((pos.shape[0], 4), dtype=np.float32)
            rgba[:] = np.asarray(default_rgbs, dtype=np.float32)
            return pos, rgba

        # 归一化到 0..1
        if col.dtype == np.uint8 or col.max() > 1.0:
            col = col.astype(np.float32) / 255.0

        # 拼 RGBA
        if col.shape[1] == 3:
            alpha = np.ones((col.shape[0], 1), dtype=np.float32)
            rgba = np.concatenate([col, alpha], axis=1)
        else:
            rgba = col

        return pos, rgba

    def save_ply_ascii_vectorized(self, point_cloud_data, filename):
        n_points = len(point_cloud_data)

        # PLY文件头
        header = f"""ply
    format ascii 1.0
    element vertex {n_points}
    property float x
    property float y
    property float z
    end_header
    """

        # 使用列表推导式和join，比循环快很多
        data_lines = [f"{point[0]:.6f} {point[1]:.6f} {point[2]:.6f}" for point in point_cloud_data]
        data_content = '\n'.join(data_lines)

        with open(filename, 'w') as f:
            f.write(header + data_content)

        self.logger.log_to_file(f" 🎉 [Info] Success save ascii vectorized pointcloud")

    def save_ply_binary_rgb(self, point_cloud_data, filename):
        if point_cloud_data.shape[1] != 6:
            self.logger.log_to_file(f" 💥 [Error] Input data size: [Nx6](x, y, z, r, g, b)")
            raise ValueError(f" 💥 [Error] Input data size: [Nx6](x, y, z, r, g, b)")

        n_points = len(point_cloud_data)

        with open(filename, 'wb') as f:
            # 写入头部
            header_lines = [
                "ply",
                "format binary_little_endian 1.0",
                f"element vertex {n_points}",
                "property float x",
                "property float y",
                "property float z",
                "property uchar red",
                "property uchar green",
                "property uchar blue",
                "end_header",
            ]
            header = "\n".join(header_lines) + "\n"
            f.write(header.encode('ascii'))

            # 创建结构化数组
            vertices = np.zeros(n_points, dtype=[
                ('x', 'f4'), ('y', 'f4'), ('z', 'f4'),
                ('r', 'u1'), ('g', 'u1'), ('b', 'u1')
            ])

            # 坐标数据
            xyz = point_cloud_data[:, :3].astype(np.float32)
            vertices['x'] = xyz[:, 0]
            vertices['y'] = xyz[:, 1]
            vertices['z'] = xyz[:, 2]

            # 颜色数据处理 - 关键修正点
            rgb_data = point_cloud_data[:, 3:6]

            # 检查数据范围并处理
            if rgb_data.max() <= 1.0:
                # 0-1范围，需要缩放到0-255
                rgb = (rgb_data * 255).astype(np.uint8)
            else:
                # 已经是0-255范围
                rgb = rgb_data.astype(np.uint8)

            # 正确的RGB映射 (不要搞成BGR!)
            vertices['r'] = rgb[:, 0]  # Red = 第0列
            vertices['g'] = rgb[:, 1]  # Green = 第1列
            vertices['b'] = rgb[:, 2]  # Blue = 第2列

            f.write(vertices.tobytes())

        self.logger.log_to_file(f" 🎉 [Info] Success save rgb-pointcloud(binary fast)")

    def save_ply_binary_fast(self, point_cloud_data, filename):
        n_points = len(point_cloud_data)

        with open(filename, 'wb') as f:
            header = f"ply\nformat binary_little_endian 1.0\nelement vertex {n_points}\n"
            header += "property float x\nproperty float y\nproperty float z\nend_header\n"
            f.write(header.encode('ascii'))

            point_cloud_float32 = point_cloud_data.astype(np.float32, copy=False)
            if not point_cloud_float32.flags.c_contiguous:
                point_cloud_float32 = np.ascontiguousarray(point_cloud_float32)

            f.write(point_cloud_float32.tobytes())

        self.logger.log_to_file(f" 🎉 [Info] Success save pointcloud(binary fast)")

    def save_pointcloud_fast(self, point_cloud_save, filename="pointcloud.ply"):
        if len(point_cloud_save) > 50000:
            self.save_ply_binary_fast(point_cloud_save, filename)
        else:
            self.save_ply_ascii_vectorized(point_cloud_save, filename)


class CalibrationReader(object):
    def __init__(self, yaml_path: str):
        self.logger = init_logger(RMSL_LOG_PATH)
        self.fs = cv2.FileStorage(yaml_path, cv2.FILE_STORAGE_READ)
        if not self.fs.isOpened():
            self.logger.log_to_file(f" 🚨 [Error] Cant not open calib file: {yaml_path}")
            raise ValueError(f" 🚨 [Error] Can not open calib file: {yaml_path}")

    def read_scalar(self, key, dtype=int, default=None):
        node = self.fs.getNode(key)
        if node.empty():
            return default

        if dtype == int:
            return int(node.real())
        elif dtype == float:
            return node.real()
        else:
            return node.string()

    def read_matrix(self, key, default=None):
        node = self.fs.getNode(key)
        if node.empty():
            return default
        return node.mat()

    def read_calibration(self, required_keys=None, optional_keys=None):
        intrinsic_info = {}

        if required_keys is None:
            required_keys = ["l_width", "l_height", "K_l", "D_l", "Q"]

        if optional_keys is None:
            optional_keys = {
                "r_width": (int, 640),
                "r_height": (int, 352),
                "rgb_width": (int, 1280),
                "rgb_height": (int, 1080),
                "R1": (None, None),
                "R2": (None, None),
                "P1": (None, None),
                "P2": (None, None),
                "K_rgb": (None, None),
                "D_rgb": (None, None),
                "R_ir2rgb": (None, None),
                "T_ir2rgb": (None, None),
                "invPR1": (None, None)
            }

        for key in required_keys:
            if key in ["K_l", "K_r", "D_l", "D_r", "R", "T", "Q", "R1", "R2", "P1", "P2"]:
                value = self.read_matrix(key)
            else:
                value = self.read_scalar(key)

            if value is None:
                self.logger.log_to_file(f" 🚨 [Error] Core key: {key} not exists!")
                raise ValueError(f" 🚨 [Error] Core key: '{key}' not exists!")
            intrinsic_info[key] = value

        for key, (dtype, default) in optional_keys.items():
            if dtype is None:
                intrinsic_info[key] = self.read_matrix(key, default)
            else:
                intrinsic_info[key] = self.read_scalar(key, dtype, default)

        return intrinsic_info

    def close(self):
        if self.fs.isOpened():
            self.fs.release()

    def __enter__(self):
        return self

    def __exit__(self, exc_type, exc_val, exc_tb):
        self.close()


class ParserCamIntrinsic:
    # 常量定义
    DISTORTION_PARAMS = ['k1', 'k2', 'p1', 'p2', 'k3', 'k4', 'k5', 'k6', 's1', 's2', 's3', 's4']

    def __init__(self, intrinsic_yaml_path: str = './config/calibration.yaml'):
        self.logger = init_logger(RMSL_LOG_PATH)
        self.intrinsic_yaml_path = intrinsic_yaml_path
        self._validate_file_path()

    def _validate_file_path(self) -> None:
        """验证文件路径"""
        if not os.path.exists(self.intrinsic_yaml_path):
            error_msg = f" 🚨 [Error] Camera intrinsic yaml not found: {self.intrinsic_yaml_path}"
            self.logger.log_to_file(error_msg)
            raise FileNotFoundError(error_msg)

    @staticmethod
    def _parse_intrinsic_matrix(K: np.ndarray) -> Dict[str, float]:
        return {
            'fx': float(K[0, 0]),
            'fy': float(K[1, 1]),
            'cx': float(K[0, 2]),
            'cy': float(K[1, 2]),
            'K': K
        }

    @classmethod
    def _parse_distortion(cls, D: np.ndarray, key_suffix: str = '') -> Dict[str, Any]:
        D_flat = D.flatten()

        if len(D_flat) < len(cls.DISTORTION_PARAMS):
            raise ValueError(f"Distortion array too short: expected {len(cls.DISTORTION_PARAMS)}, got {len(D_flat)}")

        distortion = {
            param: float(D_flat[i])
            for i, param in enumerate(cls.DISTORTION_PARAMS)
        }
        distortion[f'D{key_suffix}'] = D
        return distortion

    @staticmethod
    def _parse_rotation_matrix(R_proj: np.ndarray) -> Dict[str, float]:
        """解析旋转投影矩阵为系数字典"""
        return {f'r{i}': float(R_proj[i // 3, i % 3]) for i in range(9)}

    @staticmethod
    def _parse_translation_vector(T_proj: np.ndarray) -> Dict[str, float]:
        """解析平移向量为系数字典"""
        return {f't{i}': float(T_proj[i, 0]) for i in range(3)}

    def _parse_left_camera_info(self, intrinsic_info: Dict) -> Dict[str, Any]:
        """解析左相机/IR相机信息"""
        return {
            'l_width': intrinsic_info['l_width'],
            'l_height': intrinsic_info['l_height'],
            'l_intrinsic': self._parse_intrinsic_matrix(intrinsic_info['K_l']),
            'l_distortion': self._parse_distortion(intrinsic_info['D_l'], '_l'),
            'R1': intrinsic_info['R1'],
            'P1': intrinsic_info['P1'],
        }

    def _parse_disparity_info(self, intrinsic_info: Dict) -> Dict[str, float]:
        """解析视差相关信息"""
        Q = intrinsic_info['Q']
        return {
            'q23': Q[2, 3],
            'q32': Q[3, 2],
            'q33': Q[3, 3],
        }

    def _parse_rgb_camera_info(self, intrinsic_info: Dict) -> Dict[str, Any]:
        """解析RGB相机信息（如果存在）"""
        if intrinsic_info['K_rgb'] is None:
            return {
                'rgb_width': intrinsic_info['rgb_width'],
                'rgb_height': intrinsic_info['rgb_height'],
                'rgb_intrinsic': None,
                'rgb_distortion': None,
                'r_coeff': None,
                't_coeff': None,
                'category': '2rgb',  # 双RGB模组
            }

        # RGBD 模组
        R_proj = (intrinsic_info['K_rgb'] @ intrinsic_info['R_ir2rgb'] @ intrinsic_info['invPR1'])
        T_proj = intrinsic_info['K_rgb'] @ intrinsic_info['T_ir2rgb']

        return {
            'rgb_width': intrinsic_info['rgb_width'],
            'rgb_height': intrinsic_info['rgb_height'],
            'rgb_intrinsic': self._parse_intrinsic_matrix(intrinsic_info['K_rgb']),
            'rgb_distortion': self._parse_distortion(intrinsic_info['D_rgb']),
            'r_coeff': self._parse_rotation_matrix(R_proj),
            't_coeff': self._parse_translation_vector(T_proj),
            'category': 'rgbd',  # RGBD模组
        }

    def get_all_camera_intrinsic(self) -> Dict[str, Any]:
        with CalibrationReader(self.intrinsic_yaml_path) as reader:
            intrinsic_info = reader.read_calibration()

        calib_info = {}
        calib_info.update(self._parse_left_camera_info(intrinsic_info))
        calib_info.update(self._parse_disparity_info(intrinsic_info))
        calib_info.update(self._parse_rgb_camera_info(intrinsic_info))

        return calib_info


class RMSLAlign(object):
    def __init__(self,
                 src_w: int, src_h: int,
                 dst_w: int, dst_h: int,
                 extend_w_pixels=256,
                 extend_h_pixels=256,
                 rotate_angle=0):
        self.src_w = src_w
        self.src_h = src_h
        self.dst_w = dst_w
        self.dst_h = dst_h
        self.rotate_angle = rotate_angle
        self.extend_w_pixel = extend_w_pixels
        self.extend_h_pixel = extend_h_pixels

        self.extend_buf_w = dst_w + 2 * self.extend_w_pixel
        self.extend_buf_h = dst_h + 2 * self.extend_h_pixel

        self.lut = np.zeros((self.src_h, self.src_w, 3), dtype=np.float32)
        self.dp_extend = np.zeros((self.extend_buf_h, self.extend_buf_w), dtype=np.uint16)

        self.logger = init_logger(RMSL_LOG_PATH)

    def create_lut(self, r_coeff: dict):
        if r_coeff is None or len(r_coeff) == 0:
            self.logger.log_to_file(f" 🚨 [Error] Check calib_data file(failed get r and t coeff)")
            return

        cols, rows = np.meshgrid(np.arange(self.src_w), np.arange(self.src_h))
        x = r_coeff['r0'] * cols + r_coeff['r1'] * rows + r_coeff['r2']
        y = r_coeff['r3'] * cols + r_coeff['r4'] * rows + r_coeff['r5']
        z = r_coeff['r6'] * cols + r_coeff['r7'] * rows + r_coeff['r8']

        self.lut[:, :, 0] = x
        self.lut[:, :, 1] = y
        self.lut[:, :, 2] = z

    def align_dp_to_rgb(self, dp_img, t_coeff: dict):
        self.dp_extend.fill(0)

        # 新增：Z-buffer，float32 +inf 初始化
        H, W = self.extend_buf_h, self.extend_buf_w
        zbuf = np.full((H, W), np.inf, dtype=np.float32)

        valid_mask = dp_img > 0
        if not np.any(valid_mask):
            return self.dp_extend[
                self.extend_h_pixel:self.extend_buf_h - self.extend_h_pixel,
                self.extend_w_pixel:self.extend_buf_w - self.extend_w_pixel
            ]

        valid_coords = np.where(valid_mask)
        valid_dp_vals = dp_img[valid_coords].astype(np.float32)
        valid_lut = self.lut[valid_coords].astype(np.float32)

        uv_color = np.column_stack([
            valid_dp_vals * valid_lut[:, 0] + t_coeff['t0'],
            valid_dp_vals * valid_lut[:, 1] + t_coeff['t1'],
            valid_dp_vals * valid_lut[:, 2] + t_coeff['t2']
        ]).astype(np.float32)

        z_valid = uv_color[:, 2] >= 1e-6
        if not np.any(z_valid):
            return self.dp_extend[
                self.extend_h_pixel:self.extend_buf_h - self.extend_h_pixel,
                self.extend_w_pixel:self.extend_buf_w - self.extend_w_pixel
            ]

        uv_color = uv_color[z_valid]

        inv_z = 1.0 / uv_color[:, 2]
        new_coords = np.column_stack([
            (uv_color[:, 0] * inv_z + 0.5).astype(np.int32),
            (uv_color[:, 1] * inv_z + 0.5).astype(np.int32)
        ])

        if self.rotate_angle == 90:
            new_coords = np.column_stack([new_coords[:, 1], self.dst_h - 1 - new_coords[:, 0]])
        elif self.rotate_angle == 180:
            new_coords = np.column_stack([
                self.dst_w - 1 - new_coords[:, 0],
                self.dst_h - 1 - new_coords[:, 1]
            ])
        elif self.rotate_angle == 270:
            new_coords = np.column_stack([self.dst_w - 1 - new_coords[:, 1], new_coords[:, 0]])

        bounds_valid = (
                (new_coords[:, 0] >= 0) & (new_coords[:, 0] < self.dst_w) &
                (new_coords[:, 1] >= 0) & (new_coords[:, 1] < self.dst_h)
        )

        if not np.any(bounds_valid):
            return self.dp_extend[
                self.extend_h_pixel:self.extend_buf_h - self.extend_h_pixel,
                self.extend_w_pixel:self.extend_buf_w - self.extend_w_pixel
            ]

        new_coords = new_coords[bounds_valid]
        pixel_values = uv_color[bounds_valid, 2].astype(np.float32)
        final_coords = new_coords + np.array([self.extend_w_pixel, self.extend_h_pixel], dtype=np.int32)
        self._splat_min_2x2(zbuf, final_coords, pixel_values)

        # 把 inf（未触达像素）变 0，拷回 uint16
        out = np.where(np.isfinite(zbuf), np.clip(zbuf, 0, 65535), 0).astype(np.uint16)
        self.dp_extend[:, :] = out

        return self.dp_extend[
            self.extend_h_pixel:self.extend_buf_h - self.extend_h_pixel,
            self.extend_w_pixel:self.extend_buf_w - self.extend_w_pixel
        ]

    def _splat_min_2x2(self, zbuf, coords, values):
        x_coords = coords[:, 0]
        y_coords = coords[:, 1]

        if self.rotate_angle == 0:
            offsets = [(0, 0), (1, 0), (0, 1), (1, 1)]
        elif self.rotate_angle == 90:
            offsets = [(0, 0), (1, 0), (0, -1), (1, -1)]
        elif self.rotate_angle == 180:
            offsets = [(0, 0), (-1, 0), (0, -1), (-1, -1)]
        else:  # 270
            offsets = [(0, 0), (-1, 0), (0, 1), (-1, 1)]

        H, W = zbuf.shape
        zflat = zbuf.ravel()
        for dx, dy in offsets:
            fx = x_coords + dx
            fy = y_coords + dy
            valid = (fx >= 0) & (fx < W) & (fy >= 0) & (fy < H)
            if not np.any(valid):
                continue
            idx = (fy[valid] * W + fx[valid]).astype(np.int64)
            vals = values[valid]
            # 核心：重复索引做“取最小”聚合
            np.minimum.at(zflat, idx, vals)


class ProcessRgbThread(Thread):
    def __init__(self, stream_mgr: StreamManager, calib_info: dict, config_info: dict, en_undistort: bool = False):
        super().__init__(daemon=True)
        self.logger = init_logger(RMSL_LOG_PATH)
        self.stream_mgr = stream_mgr
        self._stop_event = Event()

        self.en_undistort = en_undistort
        self.calib_info = calib_info
        self.config_info = config_info
        self.module_category = self.calib_info.get('category', 'rgbd')

        # Get current rgb frame size
        self.rgb_w = self.stream_mgr.stream_configs['rgb'].width
        self.rgb_h = self.stream_mgr.stream_configs['rgb'].height

        # Rectify initial
        if self.en_undistort:
            self._initial_rectify()

        # fps tracking
        self.fps_counter = deque(maxlen=50)

        # ✅ 深度数据缓冲区
        # self.rgb_buffer = np.zeros((self.rgb_h, self.rgb_w, 3), dtype=np.uint16)

    def _initial_rectify(self):
        if self.module_category == 'rgbd':
            new_cam_matrix, _ = cv2.getOptimalNewCameraMatrix(
                self.calib_info['rgb_intrinsic']['K'],
                self.calib_info['rgb_distortion']['D'],
                (self.calib_info['rgb_width'], self.calib_info['rgb_height']),
                alpha=0,
                newImgSize=(self.calib_info['rgb_width'], self.calib_info['rgb_height'])
            )

            self.mapx, self.mapy = cv2.initUndistortRectifyMap(
                self.calib_info['rgb_intrinsic']['K'],
                self.calib_info['rgb_distortion']['D'],
                None,
                new_cam_matrix,
                (self.calib_info['rgb_width'], self.calib_info['rgb_height']),
                cv2.CV_32FC1
            )
        elif self.module_category == '2rgb':
            self.mapx, self.mapy = cv2.initUndistortRectifyMap(
                self.calib_info['l_intrinsic']['K_l'],
                self.calib_info['l_distortion']['D_l'],
                self.calib_info['R1'],
                self.calib_info['P1'],
                (self.calib_info['l_width'], self.calib_info['l_height']),
                cv2.CV_32FC1
            )
        else:
            self.logger.log_to_file(f" 🚨 [Error] Rectify failed initialization, not support module category: {self.module_category}.")
            raise ValueError(f" 🚨 [Error] Rectify failed initialization, not support module category: {self.module_category}.")

        self.logger.log_to_file(f" 🎉 [Info] Successfully initialized rgb rectify map params.")

    def _rectify_rgb(self, ori_img: np.ndarray) -> np.ndarray:
        if self.module_category == '2rgb':
            rgb_w = self.calib_info['l_width']
            rgb_h = self.calib_info['l_height']

            img_rgb = ori_img
            if self.rgb_w == rgb_w * 2:
                img_rgb = ori_img[::2, ::2]
                crop_total = int(self.rgb_h / 2 - rgb_h)
                crop_top = crop_total // 2
                crop_bottom = crop_total - crop_top
                img_rgb = img_rgb[crop_top: int(self.rgb_h / 2) - crop_bottom, :]
        else:
            img_rgb = ori_img

        return cv2.remap(img_rgb, self.mapx, self.mapy, cv2.INTER_LINEAR)

    def run(self):
        while not self._stop_event.is_set():
            if not self.stream_mgr.stream_started:
                time.sleep(0.02)
                continue

            rgb_data = self.stream_mgr.raw_rgb_queue.get_latest(timeout=0.05)
            if rgb_data is None:
                time.sleep(0.02)
                continue

            if self.en_undistort:
                rgb_frame = self._rectify_rgb(ori_img=rgb_data.data)
            else:
                rgb_frame = rgb_data.data

            ts = rgb_data.timestamp

            # add to match queue
            self.stream_mgr.matcher.add_rgb_data(rgb_frame, ts)

            # update ui show
            self.stream_mgr.update_ui_data(rgb=rgb_frame)

            # stat process fps of rgb
            self.fps_counter.append(time.time())
            if len(self.fps_counter) >= 25:
                fps = len(self.fps_counter) / (self.fps_counter[-1] - self.fps_counter[0])
                with self.stream_mgr.stats_lock:
                    self.stream_mgr.stats['process_rgb_fps'] = fps
        self.logger.log_to_file(f" 🎉 [Info] [ProcessRgbThread] exit.")

    def stop(self, timeout=5):
        """停止线程"""
        self.logger.log_to_file(
            f" 🎉 [Info] [ProcessRgbThread] Stopping stream..."
        )
        self._stop_event.set()

        # 等待线程结束
        self.join(timeout=timeout)

        if self.is_alive():
            self.logger.log_to_file(
                f" ⚠️ [Warning] [ProcessRgbThread] Thread did not stop gracefully"
            )

class ProcessDispThread(Thread):
    def __init__(self, stream_mgr: StreamManager, calib_info: dict, config_info: dict):
        super().__init__(daemon=True)
        self.logger = init_logger(RMSL_LOG_PATH)
        self.stream_mgr = stream_mgr
        self._stop_event = Event()

        # get size
        self.disp_w = self.stream_mgr.stream_configs['depth'].width
        self.disp_h = self.stream_mgr.stream_configs['depth'].height
        self.calib_disp_w = calib_info['l_width']
        self.calib_disp_h = calib_info['l_height']

        scale = self.disp_w / self.calib_disp_w

        # Calibration info
        self.calib_info = calib_info
        self.q23 = self.calib_info['q23']
        if scale != 1.0:
            self.q23 = self.q23 / scale

        self.q32 = self.calib_info['q32']
        self.q33 = self.calib_info['q33']
        self.module_category = self.calib_info.get('category', 'rgbd')

        # Config info
        self.en_debug = config_info['en_debug']
        self.min_depth_dis = config_info.get('min_distance_mm', 100)  # mm
        self.max_depth_dis = config_info.get('max_distance_mm', 5000)  # mm
        self.colorizer_mode = config_info.get('colorize_mode', 'jet_red2blue')

        #  api
        self.utils_pc = UtilsPointCloud()
        self.colorizer = Colorizer(lut_size=256, mode=self.colorizer_mode)

        # fps tracking
        self.fps_counter = deque(maxlen=50)

        # ✅ 预分配处理缓冲区
        self._preallocate_buffers()

        # flag
        self.last_disp = None

    def _preallocate_buffers(self):
        """预分配所有处理缓冲区"""
        shape = (self.disp_h, self.disp_w)

        # ✅ 深度数据缓冲区
        self.depth_buffer = np.zeros(shape, dtype=np.uint16)

        # ✅ 深度彩色图缓冲区
        self.depth_color_buffer = np.zeros((*shape, 3), dtype=np.uint8)

        # ✅ 差分图缓冲区（debug模式）
        if self.en_debug:
            self.disp_diff_buffer = np.zeros(shape, dtype=np.int16)  # 注意用int16存储差分
            self.last_disp_buffer = np.zeros(shape, dtype=np.uint16)
            self.disp_diff_color_buffer = np.zeros((*shape, 3), dtype=np.uint8)
        else:
            self.disp_diff_buffer = None
            self.last_disp_buffer = None

    def run(self):
        while not self._stop_event.is_set():
            if not self.stream_mgr.stream_started:
                time.sleep(0.02)
                continue

            disp_data = self.stream_mgr.raw_depth_queue.get_latest(timeout=0.05)
            if disp_data is None:
                time.sleep(0.02)
                continue

            disp_frame = disp_data.data
            ts = disp_data.timestamp

            # cal pse-depth
            depth = self.utils_pc.disp_to_depth(
                disp_frame,
                self.q23, self.q32, self.q33,
                subpixel_value=64,
                zoom_ratio=1.0,
                out=self.depth_buffer  # 👈 传入预分配缓冲区
            )
            if depth is None:
                continue

            _ = self.colorizer.colorize(depth, depth_range=(self.min_depth_dis, self.max_depth_dis), out=self.depth_color_buffer)

            # add to match queue
            self.stream_mgr.matcher.add_depth_data(disp_frame, self.depth_buffer.copy(), ts, self.depth_color_buffer.copy())

            # update ui show
            self.stream_mgr.update_ui_data(depth=self.depth_buffer, depth_color=self.depth_color_buffer)

            # ✅ Debug模式：使用预分配缓冲区计算差分
            if self.en_debug:
                if self.last_disp is not None:
                    # 使用numpy的out参数避免临时数组
                    np.subtract(disp_frame, self.last_disp_buffer,
                                out=self.disp_diff_buffer,
                                casting='unsafe')
                    _ = self.colorizer.colorize(self.disp_diff_buffer, depth_range=(5, 500), out=self.disp_diff_color_buffer)
                    self.stream_mgr.update_ui_data(disp_diff=self.disp_diff_color_buffer)

                # 保存当前帧到last_disp_buffer
                np.copyto(self.last_disp_buffer, disp_frame)
                self.last_disp = True  # 标志位

            # stat process fps of rgb
            self.fps_counter.append(time.time())
            if len(self.fps_counter) >= 25:
                fps = len(self.fps_counter) / (self.fps_counter[-1] - self.fps_counter[0])
                with self.stream_mgr.stats_lock:
                    self.stream_mgr.stats['process_depth_fps'] = fps
        self.logger.log_to_file(f" 🎉 [Info] [ProcessDispThread] exit.")

    def stop(self, timeout=5):
        """停止线程"""
        self.logger.log_to_file(
            f" 🎉 [Info] [ProcessDispThread] Stopping stream..."
        )
        self._stop_event.set()

        # 等待线程结束
        self.join(timeout=timeout)

        if self.is_alive():
            self.logger.log_to_file(
                f" ⚠️ [Warning] [ProcessDispThread] Thread did not stop gracefully"
            )


class ProcessPointCloudThread(Thread):
    def __init__(self, stream_mgr: StreamManager, calib_info: dict, config_info: dict):
        super().__init__(daemon=True)
        self.logger = init_logger(RMSL_LOG_PATH)
        self.stream_mgr = stream_mgr
        self._stop_event = Event()

        self.calib_info = calib_info
        self.utils_pc = UtilsPointCloud()
        self.en_full_pc = False

        self.min_depth_dis = config_info.get('min_distance_mm', 100)  # mm
        self.max_depth_dis = config_info.get('max_distance_mm', 5000)  # mm
        self.save_rgb_pointcloud = config_info.get('save_rgb_pointcloud', 0)

        # calib file, image size
        self.calib_rgb_w = self.calib_info.get('rgb_width', 1280)
        self.calib_rgb_h = self.calib_info.get('rgb_height', 1080)
        self.calib_disp_w = self.calib_info.get('l_width', 640)
        self.calib_disp_h = self.calib_info.get('l_height', 352)

        # uvc enable config
        self.rgb_w = self.stream_mgr.stream_configs['rgb'].width
        self.rgb_h = self.stream_mgr.stream_configs['rgb'].height
        self.disp_w = self.stream_mgr.stream_configs['depth'].width
        self.disp_h = self.stream_mgr.stream_configs['depth'].height

        scale_rgb = self.rgb_w / self.calib_rgb_w
        scale_disp = self.disp_w / self.calib_disp_w

        # Get intrinsic params
        self.fx = self.calib_info['l_intrinsic']['fx'] * scale_disp
        self.fy = self.calib_info['l_intrinsic']['fy'] * scale_disp
        self.cx = self.calib_info['l_intrinsic']['cx'] * scale_disp
        self.cy = self.calib_info['l_intrinsic']['cy'] * scale_disp

        self.module_category = self.calib_info['category']

        if self.save_rgb_pointcloud == 1 and self.module_category == 'rgbd':
            # Get r_coeff and t_coeff
            self.r_coeff = {k: v * scale_rgb for k, v in self.calib_info['r_coeff'].items()}
            self.t_coeff = {k: v * scale_rgb for k, v in self.calib_info['t_coeff'].items()}

            self.aligner = RMSLAlign(src_w=self.disp_w,
                                     src_h=self.disp_h,
                                     dst_w=self.rgb_w,
                                     dst_h=self.rgb_h,
                                     extend_w_pixels=256,
                                     extend_h_pixels=256,
                                     rotate_angle=0)
            self.aligner.create_lut(self.r_coeff)

        # fps tracking
        self.fps_counter = deque(maxlen=50)

    def en_full_pointcloud_calculate(self, enabled: bool):
        self.en_full_pc = enabled

    def run(self):
        while not self._stop_event.is_set():
            if not self.stream_mgr.stream_started:
                time.sleep(0.02)
                continue

            if not self.stream_mgr.mode_3d:
                time.sleep(0.1)
                continue

            matched_rgb_data, matched_depth_data = self.stream_mgr.matcher.find_best_match()
            if matched_rgb_data is None or matched_depth_data is None:
                time.sleep(0.05)
                continue

            matched_rgb = matched_rgb_data['img']
            ts_rgb = matched_rgb_data['timestamp']
            matched_depth = matched_depth_data['depth']
            matched_disp = matched_depth_data['disp']
            matched_depth_color = matched_depth_data['pse_color']
            ts_depth = matched_depth_data['timestamp']

            factor = 1
            if self.rgb_w > 640:
                factor = 8

            if self.save_rgb_pointcloud == 1:
                # 保存rgb点云
                if self.module_category == 'rgbd':
                    # rgbd
                    align_depth_img = self.aligner.align_dp_to_rgb(matched_depth, self.t_coeff)
                    pointcloud = self.utils_pc.generate_rgb_pointcloud(matched_rgb,
                                                                       align_depth_img,
                                                                       self.calib_info['rgb_intrinsic'],
                                                                       near_clip_mm=self.min_depth_dis,
                                                                       far_clip_mm=self.max_depth_dis,
                                                                       downsample_factor=factor,
                                                                       convert_unit_m=True)
                else:
                    # 2rgb
                    pointcloud = self.utils_pc.generate_rgb_pointcloud(matched_rgb,
                                                                       matched_depth,
                                                                       self.calib_info['l_intrinsic'],
                                                                       near_clip_mm=self.min_depth_dis,
                                                                       far_clip_mm=self.max_depth_dis,
                                                                       downsample_factor=factor,
                                                                       convert_unit_m=True)
            else:
                pointcloud, _ = self.utils_pc.generate_pointcloud(matched_depth,
                                                                  self.fx, self.fy, self.cx, self.cy,
                                                                  matched_depth.shape[0],
                                                                  matched_depth.shape[1],
                                                                  self.min_depth_dis,
                                                                  self.max_depth_dis,
                                                                  downsample_factor=factor,
                                                                  convert_unit_m=True,
                                                                  en_valid_roi=False,
                                                                  en_cen_roi=False)

            # update ui show
            self.stream_mgr.update_ui_data(pointcloud=pointcloud)

            # Save data
            self.stream_mgr.save_data.timestamp = ts_depth
            self.stream_mgr.save_data.depth = matched_depth
            self.stream_mgr.save_data.disp = matched_disp
            self.stream_mgr.save_data.rgb = matched_rgb
            self.stream_mgr.save_data.depth_color = matched_depth_color
            self.stream_mgr.save_data.time_diff = ts_depth - ts_rgb

            if self.en_full_pc:
                if self.save_rgb_pointcloud == 1:
                    # 保存rgb点云
                    if self.module_category == 'rgbd':
                        # rgbd
                        align_depth_img = self.aligner.align_dp_to_rgb(matched_depth, self.t_coeff)
                        pointcloud_full = self.utils_pc.generate_rgb_pointcloud(matched_rgb,
                                                                                align_depth_img,
                                                                                self.calib_info['rgb_intrinsic'],
                                                                                near_clip_mm=self.min_depth_dis,
                                                                                far_clip_mm=self.max_depth_dis,
                                                                                downsample_factor=1,
                                                                                convert_unit_m=True)
                    else:
                        # 2rgb
                        pointcloud_full = self.utils_pc.generate_rgb_pointcloud(matched_rgb,
                                                                                matched_depth,
                                                                                self.calib_info['l_intrinsic'],
                                                                                near_clip_mm=self.min_depth_dis,
                                                                                far_clip_mm=self.max_depth_dis,
                                                                                downsample_factor=1,
                                                                                convert_unit_m=True)
                else:
                    pointcloud_full, _ = self.utils_pc.generate_pointcloud(matched_depth,
                                                                           self.fx, self.fy, self.cx, self.cy,
                                                                           matched_depth.shape[0],
                                                                           matched_depth.shape[1],
                                                                           self.min_depth_dis,
                                                                           self.max_depth_dis,
                                                                           downsample_factor=1,
                                                                           convert_unit_m=True,
                                                                           en_valid_roi=False,
                                                                           en_cen_roi=False)

                self.stream_mgr.save_data.pointcloud = pointcloud_full
                # reset
                self.en_full_pc = False

            # stat process fps of rgb
            self.fps_counter.append(time.time())
            if len(self.fps_counter) >= 25:
                fps = len(self.fps_counter) / (self.fps_counter[-1] - self.fps_counter[0])
                with self.stream_mgr.stats_lock:
                    self.stream_mgr.stats['process_pointcloud_fps'] = fps
        self.logger.log_to_file(f" 🎉 [Info] [ProcessPointCloudThread] exit.")

    def stop(self, timeout=5):
        """停止线程"""
        self.logger.log_to_file(
            f" 🎉 [Info] [ProcessPointCloudThread] Stopping stream..."
        )
        self._stop_event.set()

        # 等待线程结束
        self.join(timeout=timeout)

        if self.is_alive():
            self.logger.log_to_file(
                f" ⚠️ [Warning] [ProcessPointCloudThread] Thread did not stop gracefully"
            )

class ProcessIRThread(Thread):
    def __init__(self, stream_mgr: StreamManager):
        super().__init__(daemon=True)
        self.logger = init_logger(RMSL_LOG_PATH)
        self.stream_mgr = stream_mgr
        self._stop_event = Event()

        # fps tracking
        self.fps_counter = deque(maxlen=50)

    def run(self):
        while not self._stop_event.is_set():
            if not self.stream_mgr.stream_started:
                time.sleep(0.02)
                continue

            if not self.stream_mgr.debug_mode:
                time.sleep(0.02)
                continue

            data_a = self.stream_mgr.raw_ir_queue.get(timeout=0.05)
            data_b = self.stream_mgr.raw_ir_queue.get(timeout=0.05)
            if data_a is None or data_b is None:
                time.sleep(0.02)
                continue
            ir_l, ir_r = data_a.data, data_b.data
            if data_a.label == 'ir_r' and data_b.label == 'ir_l':
                ir_l, ir_r = ir_r, ir_l

            self.stream_mgr.update_ui_data(
                ir_left=ir_l,
                ir_right=ir_r
            )

            self.stream_mgr.save_data.ir_left = ir_l
            self.stream_mgr.save_data.ir_right = ir_r

            # stat process fps of rgb
            self.fps_counter.append(time.time())
            if len(self.fps_counter) >= 25:
                fps = len(self.fps_counter) / (self.fps_counter[-1] - self.fps_counter[0])
                with self.stream_mgr.stats_lock:
                    self.stream_mgr.stats['process_ir_fps'] = fps
        self.logger.log_to_file(f" 🎉 [Info] [ProcessIRThread] exit.")

    def stop(self, timeout=5):
        """停止线程"""
        self.logger.log_to_file(
            f" 🎉 [Info] [ProcessIRThread] Stopping stream..."
        )
        self._stop_event.set()

        # 等待线程结束
        self.join(timeout=timeout)

        if self.is_alive():
            self.logger.log_to_file(
                f" ⚠️ [Warning] [ProcessIRThread] Thread did not stop gracefully"
            )

class SaveDataThread(Thread):
    def __init__(self, stream_mgr: StreamManager, save_dir: str, config_info: dict):
        super().__init__(daemon=True)
        self.logger = init_logger(RMSL_LOG_PATH)
        self.stream_mgr = stream_mgr
        self._stop_event = Event()

        self.utils = UtilsFile()
        self.utils_pc = UtilsPointCloud()

        self.save_dir = save_dir
        self.utils.save_path_check(self.save_dir)
        self.en_save = False
        self.save_rgb_pointcloud = config_info.get('save_rgb_pointcloud', 0)

    def set_save_flag(self, en_save: bool):
        self.en_save = en_save

    @staticmethod
    def get_timestamp():
        formatted_time = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")

        return formatted_time

    def run(self):
        while not self._stop_event.is_set():
            if not self.stream_mgr.stream_started:
                time.sleep(0.02)
                continue

            if not self.en_save:
                time.sleep(0.1)
                continue

            all_data = self.stream_mgr.save_data
            if all_data.rgb is None or all_data.depth is None:
                matched_rgb_data, matched_depth_data = self.stream_mgr.matcher.find_best_match()
                if matched_rgb_data is None or matched_depth_data is None:
                    time.sleep(0.05)
                    continue

                self.stream_mgr.save_data.timestamp = matched_depth_data['timestamp']
                self.stream_mgr.save_data.depth = matched_depth_data['depth']
                self.stream_mgr.save_data.disp = matched_depth_data['disp']
                self.stream_mgr.save_data.rgb = matched_rgb_data['img']
                self.stream_mgr.save_data.depth_color = matched_depth_data['pse_color']
                self.stream_mgr.save_data.time_diff = matched_depth_data['timestamp'] - matched_rgb_data['timestamp']

            cur_ts = SaveDataThread.get_timestamp()

            if all_data.rgb is not None:
                self.utils.save_image(all_data.rgb, self.save_dir, prefix='rgb', timestamp=cur_ts, en_rgb2bgr=True)

            if all_data.depth is not None:
                self.utils.save_image(all_data.depth, self.save_dir, prefix='depth', timestamp=cur_ts, en_rgb2bgr=False)

            if all_data.disp is not None:
                self.utils.save_image(all_data.disp, self.save_dir, prefix='disp', timestamp=cur_ts, en_rgb2bgr=False)

            if all_data.depth_color is not None:
                self.utils.save_image(all_data.depth_color, self.save_dir, prefix='depth_color', timestamp=cur_ts, en_rgb2bgr=True)

            if self.stream_mgr.debug_mode:
                # save ir image
                if all_data.ir_left is not None:
                    self.utils.save_image(all_data.ir_left, self.save_dir, prefix='ir_left', timestamp=cur_ts, en_rgb2bgr=True)

                if all_data.ir_right is not None:
                    self.utils.save_image(all_data.ir_right, self.save_dir, prefix='ir_right', timestamp=cur_ts, en_rgb2bgr=True)

            # save pointcloud
            pointcloud_name = f'pointcloud_{cur_ts}.ply'
            pointcloud_path = os.path.join(self.save_dir, pointcloud_name)

            if all_data.pointcloud is not None:
                if self.save_rgb_pointcloud == 1:
                    self.utils_pc.save_ply_binary_rgb(all_data.pointcloud, pointcloud_path)
                else:
                    self.utils_pc.save_pointcloud_fast(all_data.pointcloud, pointcloud_path)

            self.en_save = False
        self.logger.log_to_file(f" 🎉 [Info] [SaveDataThread] exit.")

    def stop(self, timeout=5):
        """停止线程"""
        self.logger.log_to_file(
            f" 🎉 [Info] [SaveDataThread] Stopping stream..."
        )
        self._stop_event.set()

        # 等待线程结束
        self.join(timeout=timeout)

        if self.is_alive():
            self.logger.log_to_file(
                f" ⚠️ [Warning] [SaveDataThread] Thread did not stop gracefully"
            )

class CustomGLViewWidget(GLViewWidget):
    def __init__(self, parent=None):
        super().__init__(parent)
        self.mouse_sensitivity = 0.5  # 降低鼠标灵敏度（默认是1.0）
        self.pan_speed = 0.1  # 右键拖动速度
        self.mousePos = QtCore.QPoint()  # 初始化鼠标位置

    def mousePressEvent(self, ev):
        # 记录鼠标按下时的位置
        self.mousePos = ev.pos()

    def mouseMoveEvent(self, ev):
        if not hasattr(self, 'mousePos'):
            self.mousePos = ev.pos()
            return

        diff = ev.pos() - self.mousePos
        self.mousePos = ev.pos()

        if ev.buttons() == QtCore.Qt.LeftButton:
            # 左键旋转，应用灵敏度系数
            self.orbit(-diff.x() * self.mouse_sensitivity,
                       diff.y() * self.mouse_sensitivity)
        elif ev.buttons() == QtCore.Qt.MiddleButton:
            # 中键缩放
            if diff.y() != 0:
                scale_factor = 1.0 + diff.y() * 0.01 * self.mouse_sensitivity
                self.opts['distance'] *= scale_factor
                self.update()
        elif ev.buttons() == QtCore.Qt.RightButton:
            # 右键平移
            self.pan(diff.x() * self.pan_speed,
                     diff.y() * self.pan_speed, 0, relative='view')

    def wheelEvent(self, ev):
        # 调整滚轮缩放灵敏度
        delta = ev.angleDelta().y()
        if delta > 0:
            scale_factor = 1 - (0.1 * self.mouse_sensitivity)
        else:
            scale_factor = 1 + (0.1 * self.mouse_sensitivity)

        self.opts['distance'] *= scale_factor
        self.update()


class InitWorker(QThread):
    progress = pyqtSignal(str)  # 进度信息
    finished = pyqtSignal(dict)  # 完成信号，传递初始化数据
    error = pyqtSignal(str)  # 错误信号

    def run(self):
        try:
            init_data = {}

            # 1. 枚举设备
            self.progress.emit(" [Info] Enumerating UVC devices...")
            controller = RMSLUVCController()
            devices = controller.enumerate_devices()
            init_data['devices'] = devices

            # 2. 获取格式
            self.progress.emit(" [Info] Getting supported formats ...")
            formats_dict = {}
            for idx, name in devices:
                enumer = UVCFormatEnumerator(str(name))
                formats = enumer.get_supported_formats()
                if 'rgb' in name.lower():
                    for config in formats:
                        config['name'] = name
                    sorted_formats = sorted(
                        formats,
                        key=lambda x: (x['width'], x['height']),
                        reverse=True
                    )
                    formats_dict['rgb'] = sorted_formats
                elif 'depth' in name.lower() or 'disp' in name.lower():
                    for config in formats:
                        config['name'] = name
                    sorted_formats = sorted(
                        formats,
                        key=lambda x: (x['width'], x['height']),
                        reverse=True
                    )
                    formats_dict['depth'] = sorted_formats
                elif 'ir' in name.lower():
                    for config in formats:
                        config['name'] = name
                    sorted_formats = sorted(
                        formats,
                        key=lambda x: (x['width'], x['height']),
                        reverse=True
                    )
                    formats_dict['ir'] = sorted_formats
            init_data['formats'] = formats_dict

            # 3. 初始化设备
            self.progress.emit(" [Info] Initializing device...")
            controller.init_device(0)
            init_data['controller'] = controller

            # 4. 获取标定数据
            self.progress.emit(" [Info] Reading calibration data...")
            calib_params = controller.get_calibration_data(verify_checksum=True)

            # 5. 解析标定数据
            self.progress.emit(" [Info] Analyzing calibration parameters...")
            parser = CalibrationParser(calib_params.params)
            all_calib_info = parser.get_all_camera_intrinsic()
            init_data['calib_info'] = all_calib_info

            self.finished.emit(init_data)

        except Exception as e:
            self.error.emit(f" [Error] Failed initialization: {str(e)}")


class RMSLViewer(QWidget, Ui_RMSLViewer):
    def __init__(self):
        super(RMSLViewer, self).__init__()
        self.logger = init_logger(RMSL_LOG_PATH)
        self.config_file_path = str(PROJECT_ROOT / 'config.json')
        self.save_dir = str(PROJECT_ROOT / 'save')

        self.utils = UtilsFile()
        self.utils_pc = UtilsPointCloud()
        self.config_info = self.utils.read_json(self.config_file_path)
        self.work_mode = 'Normal'
        self.en_debug = self.config_info.get('en_debug', False)

        # 初始化状态变量
        self.calib_info = None
        self.controller = None
        self.devices = []
        self.formats = {}

        # UI 基础初始化
        self.setupUi(self)
        self._setup_ui_base()

        self.setEnabled(False)  # 等待初始化完成
        self.show()

        """-------------------------------------------------------------------------------------"""
        # 初始化后处理线程
        self.process_rgb_thread = None
        self.process_dp_thread = None
        self.process_pc_thread = None
        self.process_ir_thread = None
        self.save_thread = None

        """-------------------------------------------------------------------------------------"""
        self.img_rgb = None
        self.img_depth = None
        self.img_depth_color = None
        self.pointcloud = None

        """-------------------------------------------------------------------------------------"""

        # 启动初始化线程
        self._start_initialization()

    def _setup_ui_base(self):
        # icons change
        self.icon_open_inactive = QIcon(":/image/image/stop.png")
        self.icon_open_active = QIcon(":/image/image/start.png")
        self.icon_lock_inactive = QIcon(":/image/image/unlock.png")
        self.icon_lock_active = QIcon(":/image/image/lock.png")
        self.icon_3d_inactive = QIcon(":/image/image/2d.png")
        self.icon_3d_active = QIcon(":/image/image/3d.png")

        """-------------------------------------------------------------------------------------"""
        # beautiful message box
        self.message_box_style = MessageBox_Style
        self.setup_message_box_style()

        """-------------------------------------------------------------------------------------"""

        # status
        self.is_started = False
        self.is_locked = False
        self.is_3d = False

        # scene and image item, for preview.
        self.scene_rgb = QGraphicsScene()
        self.scene_depth = QGraphicsScene()
        self.image_item_rgb = QGraphicsPixmapItem()
        self.image_item_depth = QGraphicsPixmapItem()

        """-------------------------------------------------------------------------------------"""
        # 安装事件过滤器（监听点击）
        self.view_rgb.viewport().installEventFilter(self)
        self.view_depth.viewport().installEventFilter(self)

        """-------------------------------------------------------------------------------------"""

        self.button_init()
        self.textbrowser_init()
        self.view_init()

        """-------------------------------------------------------------------------------------"""
        # bind logger
        self.logger.log_signal.connect(self.log_update)

        """-------------------------------------------------------------------------------------"""

        # 3D View
        self._setup_3d_view()

    def _start_initialization(self):
        """启动异步初始化"""
        self.init_worker = InitWorker()
        self.init_worker.progress.connect(self._on_init_progress)
        self.init_worker.finished.connect(self._on_init_finished)
        self.init_worker.error.connect(self._on_init_error)
        self.init_worker.start()

    def _on_init_progress(self, message):
        """更新初始化进度"""
        self.logger.log(message)

    def _on_init_finished(self, init_data):
        try:
            self.devices = init_data['devices']
            self.uvc_config = init_data['formats']
            self.controller = init_data['controller']
            self.calib_info = init_data['calib_info']

            # 确定模块类型
            self.module_category = self.calib_info['category']

            # 占位符
            self.stream_mgr = None
            self.used_uvc_config = {}
            self.cam_name_rgb = 'RMSL321_RGB'
            self.cam_name_depth = 'RMSL321_DEPTH'
            self.cam_name_ir = 'RMSL321_IR'
            if self.uvc_config['rgb'] is not None:
                self.cam_name_rgb = self.uvc_config['rgb'][0]['name']
                self.cam_name_depth = self.uvc_config['depth'][0]['name']
                self.cam_name_ir = self.uvc_config['ir'][0]['name']

            # 初始化依赖硬件数据的组件
            self._setup_lineedit_initialization()
            self._setup_combobox_with_data()

            # 启用窗口
            self.setEnabled(True)
            self.logger.log(f" [Info] Success initialization.")

            # 启动定时器
            self.update_timer = QTimer()
            self.update_timer.timeout.connect(self.update_display)
            self.update_timer.start(33)

            self.logger.log(" [Info] Device success initialization.")

        except Exception as e:
            self._on_init_error(f" [Error] Failed initial device: {str(e)}")

    def _on_init_error(self, error_msg):
        """初始化错误处理"""
        self.logger.log(error_msg)
        QMessageBox.critical(self, "Initialization error", error_msg)
        self.close()

    def _setup_stream_manager(self):
        self.stream_mgr = StreamManager(self.used_uvc_config, en_rgb_pointcloud=self.config_info.get('save_rgb_pointcloud', 0))

    def _setup_3d_view(self):
        # 初始化 3D 视图（只创建一次）
        self.glw = CustomGLViewWidget()
        self.glw.setBackgroundColor((20, 20, 20))
        self.glw.opts['fov'] = 60
        self.glw.opts['elevation'] = 20
        self.glw.opts['azimuth'] = 45
        self.glw.opts['distance'] = 2.0

        # 可以动态调整灵敏度
        self.glw.mouse_sensitivity = 0.3  # 更低的值表示更慢的响应
        self.glw.pan_speed = 0.2  # 调整平移速度

        if not self.layout3D.layout():
            self.layout3D_layout = QtWidgets.QVBoxLayout(self.layout3D)
            self.layout3D_layout.setContentsMargins(0, 0, 0, 0)
            self.layout3D_layout.setSpacing(0)
        self.layout3D_layout.addWidget(self.glw)

        self.axis = gl.GLAxisItem()
        self.axis.setSize(x=0.5, y=0.5, z=0.5)  # 分别设置
        self.glw.addItem(self.axis)
        self.axis.hide()
        self.pc_item = gl.GLScatterPlotItem(size=2.0, pxMode=True)
        self.glw.addItem(self.pc_item)

        self._is_first_pointcloud = True

    @staticmethod
    def resize_and_pad(img, target_h, target_w, en_rotate: bool = True):
        """
        将图像按比例缩放以适应 (target_h, target_w) 的尺寸，
        然后填充黑边使其居中。
        """
        if en_rotate:
            img = cv2.rotate(img, cv2.ROTATE_90_CLOCKWISE)
        original_h, original_w = img.shape[:2]

        # 1. 计算缩放比例
        # 看是高度限制了缩放，还是宽度限制了缩放
        ratio_h = target_h / original_h
        ratio_w = target_w / original_w
        scale_ratio = min(ratio_h, ratio_w)

        # 2. 计算缩放后的新尺寸
        new_w = int(original_w * scale_ratio)
        new_h = int(original_h * scale_ratio)

        # 3. 使用cv2.resize进行缩放
        # cv2.INTER_AREA 适合缩小图像，cv2.INTER_LINEAR 适合放大
        if scale_ratio < 1:
            interpolation = cv2.INTER_AREA
        else:
            interpolation = cv2.INTER_LINEAR

        # 注意：cv2.resize 的 dsize 参数是 (width, height)
        resized_img = cv2.resize(img, (new_w, new_h), interpolation=interpolation)

        # 4. 创建目标尺寸的黑色画布 (Pad)
        # 检查图像是彩色的 (3-channel) 还是灰度的 (1-channel 或 2-channel)
        if len(img.shape) == 3:
            # 创建一个 (h, w, c) 的画布
            padded = np.zeros((target_h, target_w, img.shape[2]), dtype=img.dtype)
        else:
            # 创建一个 (h, w) 的画布
            padded = np.zeros((target_h, target_w), dtype=img.dtype)

        # 5. 将缩放后的图像粘贴到画布中央
        # 计算粘贴位置 (左上角)
        y_offset = (target_h - new_h) // 2
        x_offset = (target_w - new_w) // 2

        padded[y_offset: y_offset + new_h, x_offset: x_offset + new_w] = resized_img

        return padded

    def update_display(self):
        if self.stream_mgr is None or not self.stream_mgr.stream_started:
            return

        img_dict = self.stream_mgr.get_ui_data()
        if img_dict['rgb'] is not None:
            self.img_rgb = img_dict['rgb']
            if self.en_debug:
                # show diff and ir images
                if img_dict['ir_left'] is not None and img_dict['ir_right'] is not None and img_dict['disp_diff'] is not None:
                    TARGET_H, TARGET_W = 540, 640
                    rgb_orig = self.img_rgb[::2, ::2]
                    ir_l_orig = img_dict['ir_left'][::2, ::2]
                    ir_r_orig = img_dict['ir_right'][::2, ::2]
                    diff_orig = img_dict['disp_diff'][::2, ::2]

                    rgb_ds = RMSLViewer.resize_and_pad(rgb_orig, TARGET_H, TARGET_W, en_rotate=True)
                    ir_l_ds = RMSLViewer.resize_and_pad(ir_l_orig, TARGET_H, TARGET_W, en_rotate=True)
                    ir_r_ds = RMSLViewer.resize_and_pad(ir_r_orig, TARGET_H, TARGET_W, en_rotate=True)
                    diff_ds = RMSLViewer.resize_and_pad(diff_orig, TARGET_H, TARGET_W, en_rotate=False)
                    top_row = np.hstack([rgb_ds, diff_ds])
                    bottom_row = np.hstack([ir_l_ds, ir_r_ds])
                    color_frame = np.vstack([top_row, bottom_row])
                else:
                    color_frame = self.img_rgb
            else:
                color_frame = self.img_rgb

            color_img = QImage(color_frame,
                               color_frame.shape[1],
                               color_frame.shape[0],
                               QImage.Format_RGB888)
            if not color_img.isNull():
                pixmap = QPixmap.fromImage(color_img)
                self.image_item_rgb.setPixmap(pixmap)
                self.image_item_rgb.setZValue(0)
                self.show_rgb_frame()

        if not self.is_3d:
            if img_dict['depth_color'] is not None and img_dict['depth'] is not None:
                self.img_depth = img_dict['depth']
                self.img_depth_color = img_dict['depth_color']

            if self.img_depth_color is not None:
                depth_color = QImage(self.img_depth_color,
                                     self.img_depth_color.shape[1],
                                     self.img_depth_color.shape[0],
                                     QImage.Format_RGB888)

                if not depth_color.isNull():
                    pixmap = QPixmap.fromImage(depth_color)
                    self.image_item_depth.setPixmap(pixmap)
                    self.image_item_depth.setZValue(0)
                    self.show_depth_frame()
        else:
            if img_dict['pointcloud'] is not None:
                self.pointcloud = img_dict['pointcloud']

            # 3D mode
            self.update_pointcloud_display(self.pointcloud)

    def setup_message_box_style(self):
        QApplication.instance().setStyleSheet(QApplication.instance().styleSheet() + self.message_box_style)

    def log_update(self, msg):
        self.terminal_log.append(msg)
        scrollbar = self.terminal_log.verticalScrollBar()
        scrollbar.setValue(scrollbar.maximum())

    def _setup_combobox_with_data(self):
        self.comboBox_mode.setEnabled(True)
        self.comboBox_mode.clear()
        mode = ['Normal', 'Calibration', 'DEBUG']
        self.comboBox_mode.addItems(mode)

        if self.en_debug:
            self.comboBox_mode.setCurrentIndex(2)
        else:
            self.comboBox_mode.setCurrentIndex(0)
        self.comboBox_mode.currentTextChanged.connect(self.update_work_mode)

        self.comboBox_rgb_resolution.setEnabled(True)
        self.comboBox_rgb_resolution.clear()

        if self.en_debug:
            # TODO: DEBUG MODE
            config_rgb = {'name': self.cam_name_rgb, 'format': 'nv12', 'width': 1080, 'height': 1280, 'fps': 30.0}
            self.used_uvc_config['rgb'] = config_rgb
            display_text = f"{config_rgb['width']}x{config_rgb['height']}@{int(config_rgb['fps'])}-{config_rgb['format'].lower()}"
            self.comboBox_rgb_resolution.addItem(display_text, config_rgb)
        else:
            if self.uvc_config['rgb'] is not None:
                self.used_uvc_config['rgb'] = self.uvc_config['rgb'][0]
                for config in self.uvc_config['rgb']:
                    display_text = f"{config['width']}x{config['height']}@{int(config['fps'])}-{config['format'].lower()}"
                    self.comboBox_rgb_resolution.addItem(display_text, config)
            else:
                config_rgb = {'name': self.cam_name_rgb, 'format': 'nv12', 'width': 1280, 'height': 1080, 'fps': 30.0}
                self.used_uvc_config['rgb'] = config_rgb
                display_text = f"{config_rgb['width']}x{config_rgb['height']}@{int(config_rgb['fps'])}-{config_rgb['format'].lower()}"
                self.comboBox_rgb_resolution.addItem(display_text, config_rgb)

        self.comboBox_rgb_resolution.setCurrentIndex(0)
        self.comboBox_rgb_resolution.currentTextChanged.connect(self.update_uvc_rgb_config)

        # disp
        self.comboBox_disp_resolution.setEnabled(True)
        self.comboBox_disp_resolution.clear()

        if self.uvc_config['depth'] is not None:
            self.used_uvc_config['depth'] = self.uvc_config['depth'][0]
            for config in self.uvc_config['depth']:
                display_text = f"{config['width']}x{config['height']}@{int(config['fps'])}-{config['format'].lower()}"
                self.comboBox_disp_resolution.addItem(display_text, config)
        else:
            config_disp = {'name': self.cam_name_depth, 'format': 'yuyv422', 'width': 640, 'height': 352, 'fps': 10.0}
            self.used_uvc_config['depth'] = config_disp
            display_text = f"{config_disp['width']}x{config_disp['height']}@{int(config_disp['fps'])}-{config_disp['format'].lower()}"
            self.comboBox_disp_resolution.addItem(display_text, config_disp)

        self.comboBox_disp_resolution.setCurrentIndex(0)
        self.comboBox_disp_resolution.currentTextChanged.connect(self.update_uvc_disp_config)

        # ir
        if self.en_debug:
            self.comboBox_ir_resolution.setEnabled(True)
            self.comboBox_ir_resolution.clear()
            if self.uvc_config['ir'] is not None:
                self.used_uvc_config['ir'] = self.uvc_config['ir'][0]
                for config in self.uvc_config['ir']:
                    display_text = f"{config['width']}x{config['height']}@{int(config['fps'])}-{config['format'].lower()}"
                    self.comboBox_ir_resolution.addItem(display_text, config)
            else:
                config_ir = {'name': self.cam_name_ir, 'format': 'nv12', 'width': 1080, 'height': 1280, 'fps': 10.0}
                self.used_uvc_config['ir'] = config_ir
                display_text = f"{config_ir['width']}x{config_ir['height']}@{int(config_ir['fps'])}-{config_ir['format'].lower()}"
                self.comboBox_ir_resolution.addItem(display_text, config_ir)

            self.comboBox_ir_resolution.setCurrentIndex(0)
            self.comboBox_ir_resolution.currentTextChanged.connect(self.update_uvc_ir_config)
        else:
            config_ir = {'name': self.cam_name_ir, 'format': 'nv12', 'width': 1080, 'height': 1280, 'fps': 10.0}
            self.used_uvc_config['ir'] = config_ir
            display_text = f"{config_ir['width']}x{config_ir['height']}@{int(config_ir['fps'])}-{config_ir['format'].lower()}"
            self.comboBox_ir_resolution.addItem(display_text, config_ir)
            self.comboBox_ir_resolution.setEnabled(False)

    def update_work_mode(self, mode_text):
        self.work_mode = mode_text
        if mode_text == 'DEBUG':
            self.en_debug = True
            self.comboBox_rgb_resolution.clear()

            # TODO: DEBUG
            config_rgb = {'name': self.cam_name_rgb, 'format': 'nv12', 'width': 1080, 'height': 1280, 'fps': 30.0}
            display_text = f"{config_rgb['width']}x{config_rgb['height']}@{int(config_rgb['fps'])}-{config_rgb['format'].lower()}"
            self.comboBox_rgb_resolution.addItem(display_text, config_rgb)
        else:
            self.en_debug = False
            self.comboBox_rgb_resolution.clear()
            if self.uvc_config['rgb'] is not None:
                for config in self.uvc_config['rgb']:
                    display_text = f"{config['width']}x{config['height']}@{int(config['fps'])}-{config['format'].lower()}"
                    self.comboBox_rgb_resolution.addItem(display_text, config)
            else:
                config_rgb = {'name': self.cam_name_rgb, 'format': 'nv12', 'width': 1280, 'height': 1080, 'fps': 30.0}
                display_text = f"{config_rgb['width']}x{config_rgb['height']}@{int(config_rgb['fps'])}-{config_rgb['format'].lower()}"
                self.comboBox_rgb_resolution.addItem(display_text, config_rgb)

    def update_uvc_rgb_config(self, text):
        selected_config = self.comboBox_rgb_resolution.currentData()

        if selected_config:
            # update calib_info
            new_calib_info = CalibrationScaler.scale_intrinsic(
                calib_info=self.calib_info.copy(),
                target_width=selected_config['width'],
                target_height=selected_config['height'],
                camera_type='rgb'
            )
            self.calib_info = CalibrationScaler.update_alignment_coeffs(new_calib_info)

            # update stream_mgr
            self.used_uvc_config['rgb'] = selected_config
            self.logger.log(f" [Info] Update UVC config: {selected_config['name']}: {selected_config['width']}x{selected_config['height']}@{int(selected_config['fps'])}-{selected_config['format'].lower()}")

    def update_uvc_disp_config(self, text):
        selected_config = self.comboBox_disp_resolution.currentData()

        if selected_config:
            # update calib_info
            new_calib_info = CalibrationScaler.scale_intrinsic(
                calib_info=self.calib_info.copy(),
                target_width=selected_config['width'],
                target_height=selected_config['height'],
                camera_type='depth'
            )
            self.calib_info = CalibrationScaler.update_alignment_coeffs(new_calib_info)

            # update stream_mgr
            self.used_uvc_config['depth'] = selected_config
            self.logger.log(f" [Info] Update UVC config: {selected_config['name']}: {selected_config['width']}x{selected_config['height']}@{int(selected_config['fps'])}-{selected_config['format'].lower()}")

    def update_uvc_ir_config(self, text):
        selected_config = self.comboBox_ir_resolution.currentData()

        if selected_config:
            # update stream_mgr
            self.used_uvc_config['ir'] = selected_config
            self.logger.log(f" [Info] Update UVC config: {selected_config['name']}: {selected_config['width']}x{selected_config['height']}@{int(selected_config['fps'])}-{selected_config['format'].lower()}")

    def _setup_lineedit_initialization(self):
        self.lineedit_device_code.setEnabled(True)
        self.lineedit_device_code.setReadOnly(True)
        self.lineedit_device_code.clear()

        prefix = 'RMSL321'
        if self.uvc_config['rgb'] is not None:
            prefix = self.uvc_config['rgb'][0]['name'].split('_')[0]

        # FIXME: 实际上需要从 calib_info 获取
        sn_code = f"{prefix}_RK-25042-1117"
        if 'sn_code' in self.calib_info:
            sn_code = f"{prefix}_RK-" + self.calib_info['sn_code']

        # Setting
        self.lineedit_device_code.setText(sn_code)

    def button_init(self):
        self.pushButton_performance.setEnabled(True)
        self.pushButton_performance.clicked.connect(self.performance_tracking)

        self.pushButton_setting.setEnabled(True)
        self.pushButton_setting.clicked.connect(self.setting_params)

        self.pushButton_help.setEnabled(True)
        self.pushButton_help.clicked.connect(self.help_page)

        self.pushButton_start.setEnabled(True)
        self.pushButton_start.setIcon(self.icon_open_inactive)
        self.pushButton_start.clicked.connect(self.start_and_stop_process)

        self.pushButton_view_mode.setEnabled(True)
        self.pushButton_view_mode.setIcon(self.icon_3d_inactive)
        self.pushButton_view_mode.clicked.connect(self.view_mode_change)

        self.pushButton_lock.setEnabled(True)
        self.pushButton_lock.setIcon(self.icon_lock_inactive)
        self.pushButton_lock.clicked.connect(self.lock_and_unlock_view)

        self.pushButton_download.setEnabled(True)
        self.pushButton_download.clicked.connect(self.save_data)

    def performance_tracking(self):
        pass

    def setting_params(self):
        pass

    def help_page(self):
        pass

    def textbrowser_init(self):
        self.terminal_log.setReadOnly(True)
        self.terminal_log.setTextInteractionFlags(
            Qt.TextSelectableByMouse | Qt.TextSelectableByKeyboard
        )
        self.terminal_log.setOpenExternalLinks(True)
        self.terminal_log.setPlaceholderText(" ⏳ RMSLViewer Log terminal, waiting...")

    def view_init(self):
        self.view_rgb.setRenderHint(QPainter.SmoothPixmapTransform)
        self.view_rgb.setRenderHint(QPainter.Antialiasing)
        self.view_rgb.setOptimizationFlags(QGraphicsView.DontAdjustForAntialiasing)
        self.view_rgb.setOptimizationFlags(QGraphicsView.DontSavePainterState)
        self.view_rgb.setViewportUpdateMode(QGraphicsView.FullViewportUpdate)

        self.view_depth.setRenderHint(QPainter.SmoothPixmapTransform)
        self.view_depth.setRenderHint(QPainter.Antialiasing)
        self.view_depth.setOptimizationFlags(QGraphicsView.DontAdjustForAntialiasing)
        self.view_depth.setOptimizationFlags(QGraphicsView.DontSavePainterState)
        self.view_depth.setViewportUpdateMode(QGraphicsView.FullViewportUpdate)

        # center visual
        self.view_rgb.centerOn(self.image_item_rgb)
        self.view_rgb.setAlignment(Qt.AlignCenter)
        self.view_depth.centerOn(self.image_item_depth)
        self.view_depth.setAlignment(Qt.AlignCenter)

    def start_and_stop_process(self):
        if not self.is_started:
            # update icon
            self.pushButton_start.setIcon(self.icon_open_active)
            # load image to graphview
            self.start_view()
            self.is_started = True
        else:
            # update icon
            self.pushButton_start.setIcon(self.icon_open_inactive)
            # stop load image to graphview
            self.stop_view()
            self.is_started = False

    def start_view(self):
        self._setup_stream_manager()
        self.stream_mgr.start()
        # rgb
        self.process_rgb_thread = ProcessRgbThread(self.stream_mgr, self.calib_info, self.config_info, en_undistort=False)
        self.process_rgb_thread.start()
        # disp
        self.process_dp_thread = ProcessDispThread(self.stream_mgr, self.calib_info, self.config_info)
        self.process_dp_thread.start()
        # pointcloud
        self.process_pc_thread = ProcessPointCloudThread(self.stream_mgr, self.calib_info, self.config_info)
        self.process_pc_thread.start()

        if self.en_debug == 1:
            self.process_ir_thread = ProcessIRThread(self.stream_mgr)
            self.process_ir_thread.start()

        self.save_thread = SaveDataThread(self.stream_mgr, self.save_dir, self.config_info)
        self.save_thread.start()

    def stop_view(self):
        # stop process thread
        self.save_thread.stop()
        if self.en_debug == 1:
            self.process_ir_thread.stop()

        self.process_pc_thread.stop()
        self.process_rgb_thread.stop()
        self.process_dp_thread.stop()

        self.stream_mgr.stop()

    def view_mode_change(self):
        if not self.is_3d:
            # enable vis-pointcloud mode
            self.is_3d = True
            self.pushButton_view_mode.setIcon(self.icon_3d_active)
            self.depthStack.setCurrentIndex(1)
            self.stream_mgr.mode_3d = True
            self.logger.log(f" 🎉 [Info] Convert to 3D-PointCloud visual mode.")
        else:
            self.is_3d = False
            self.stream_mgr.mode_3d = False
            self.pushButton_view_mode.setIcon(self.icon_3d_inactive)
            self.depthStack.setCurrentIndex(0)

            self.logger.log(f" 🎉 [Info] Convert to 2D-Image visual mode.")

    def lock_and_unlock_view(self):
        if not self.is_locked:
            # close timer, not flush ui
            self.update_timer.stop()
            self.is_locked = True
            self.pushButton_lock.setIcon(self.icon_lock_active)
            self.logger.log(f" 🎉 [Info] Lock view.")
        else:
            self.update_timer.start(1000 / 33)
            self.is_locked = False
            self.pushButton_lock.setIcon(self.icon_lock_inactive)
            self.logger.log(f" 🎉 [Info] Unlock view.")

    def save_data(self):
        self.process_pc_thread.en_full_pointcloud_calculate(True)
        self.save_thread.set_save_flag(True)

    def update_pointcloud_display(self, pointcloud):
        if pointcloud is not None:
            if pointcloud.shape[1] == 3:
                pos, rgba = self.utils_pc.prepare_glscatter_data(pointcloud, colors=None)
            else:
                pos, rgba = self.utils_pc.prepare_glscatter_data(pointcloud)

            if pos.size:
                self.pc_item.setData(pos=pos, color=rgba)

                if self._is_first_pointcloud and len(pos) > 0:
                    min_bounds = np.min(pos, axis=0)
                    max_bounds = np.max(pos, axis=0)
                    center = (min_bounds + max_bounds) / 2
                    size = np.max(max_bounds - min_bounds)
                    center_vector = pg.Vector(center[0], center[1], center[2])

                    self.glw.setCameraPosition(
                        pos=center_vector,
                        distance=size * 1.0,
                        elevation=20,
                        azimuth=45
                    )

                    self._is_first_pointcloud = False
            else:
                # Clear
                self.pc_item.setData(pos=np.zeros((0, 3), dtype=np.float32), color=None)

    def show_depth_frame(self):
        if not self.view_depth.scene():
            self.scene_depth.addItem(self.image_item_depth)
        self.view_depth.setScene(self.scene_depth)
        self.view_depth.fitInView(self.image_item_depth, Qt.KeepAspectRatio)

    def show_rgb_frame(self):
        if not self.view_rgb.scene():
            self.scene_rgb.addItem(self.image_item_rgb)
        self.view_rgb.setScene(self.scene_rgb)
        self.view_rgb.fitInView(self.image_item_rgb, Qt.KeepAspectRatio)

    def resizeEvent(self, event):
        super().resizeEvent(event)
        if not self.image_item_rgb.pixmap().isNull():
            self.view_rgb.fitInView(self.image_item_rgb, Qt.KeepAspectRatio)
        if self.depthStack.currentWidget() is self.view2D:
            if not self.image_item_depth.pixmap().isNull():
                self.view_depth.fitInView(self.image_item_depth, Qt.KeepAspectRatio)

    def closeEvent(self, event):
        if hasattr(self, "save_thread") and self.save_thread:
            self.save_thread.stop()

        if hasattr(self, "process_pc_thread") and self.process_pc_thread:
            self.process_pc_thread.stop()

        if hasattr(self, "process_dp_thread") and self.process_dp_thread:
            self.process_dp_thread.stop()

        if hasattr(self, "process_rgb_thread") and self.process_rgb_thread:
            self.process_rgb_thread.stop()

        if self.en_debug == 1:
            if hasattr(self, "process_ir_thread") and self.process_ir_thread:
                self.process_ir_thread.stop()

        if hasattr(self, "stream_mgr") and self.stream_mgr:
            self.stream_mgr.stop()
        super().closeEvent(event)
        event.accept()

    def eventFilter(self, obj, event):
        if not self.is_locked:
            return super().eventFilter(obj, event)

        if event.type() == QEvent.MouseButtonPress and event.button() == Qt.LeftButton:
            if obj is self.view_rgb.viewport():
                if self._show_image_tooltip(self.view_rgb, self.image_item_rgb, event):
                    return True
            elif obj is self.view_depth.viewport():
                if self._show_image_tooltip(self.view_depth, self.image_item_depth, event, en_distance=True):
                    return True

        return super().eventFilter(obj, event)

    def _show_image_tooltip(self, view: QGraphicsView, item: QGraphicsPixmapItem, event, en_distance: bool = False):
        if item is None:
            return False

        pix = item.pixmap()
        if pix.isNull():
            return False

        vp_pos = event.pos()  # QPoint (Qt5)
        # 1) 命中测试：只在点到图像上时处理，避免留白区域误判
        hit_item = view.itemAt(vp_pos)
        if hit_item is not item:
            return False

        # 2) 坐标映射
        scene_pos = view.mapToScene(vp_pos)
        item_pos = item.mapFromScene(scene_pos)  # QPointF (item局部坐标，未缩放的逻辑坐标)

        # 3) 先用浮点做边界判断，避免 int 截断造成的负数变0
        dpr = pix.devicePixelRatio()  # HiDPI
        img_w = pix.width() / dpr
        img_h = pix.height() / dpr

        fx = item_pos.x()
        fy = item_pos.y()
        if not (0.0 <= fx < img_w and 0.0 <= fy < img_h):
            return False

        # 4) 再转像素坐标索引（向下取整）
        ix = int(fx * dpr)
        iy = int(fy * dpr)

        img = pix.toImage()
        # Qt5 一般有 pixelColor；如果你环境不支持，可改用 img.pixel + QColor
        if hasattr(img, "pixelColor"):
            c = img.pixelColor(ix, iy)
        else:
            rgb = img.pixel(ix, iy)
            c = QColor(rgb)

        # 5) 用 viewport 作为 widget & 坐标基准，避免窗口放大后 tooltip 定位异常
        gpos = view.viewport().mapToGlobal(vp_pos)
        if en_distance:
            if self.img_depth is None or self.img_depth.size == 0:
                QToolTip.showText(gpos,
                                  f"({int(fx)}, {int(fy)})  RGB: ({c.red()}, {c.green()}, {c.blue()})",
                                  view.viewport())
                return False

            depth_h, depth_w = self.img_depth.shape[:2]

            normalized_x = fx / img_w
            normalized_y = fy / img_h

            real_pos_x = int(normalized_x * depth_w)
            real_pos_y = int(normalized_y * depth_h)

            real_pos_x = min(real_pos_x, depth_w - 1)
            real_pos_y = min(real_pos_y, depth_h - 1)

            min_depth_dis = self.config_info.get('min_distance_mm', 100)  # mm
            max_depth_dis = self.config_info.get('max_distance_mm', 5000)  # mm
            distance = np.clip(self.img_depth[real_pos_y, real_pos_x], min_depth_dis, max_depth_dis)

            QToolTip.showText(gpos,
                              f"({int(fx)}, {int(fy)})  RGB: ({c.red()}, {c.green()}, {c.blue()})  Distance: {distance:.2f} mm",
                              view.viewport())
        else:
            QToolTip.showText(gpos,
                              f"({int(fx)}, {int(fy)})  RGB: ({c.red()}, {c.green()}, {c.blue()})",
                              view.viewport())
        return True

if __name__ == '__main__':
    # controller = RMSLUVCController()
    # print("【1】枚举设备:")
    # devices = controller.enumerate_devices()
    # if not devices:
    #     print("  未找到Rockchip设备！")
    #     exit(1)
    #
    # for idx, name in devices:
    #     enumer = UVCFormatEnumerator(str(name))
    #     formats = enumer.get_supported_formats()
    #     print(f"  [{idx}] {name}, formats: {formats}")
    #
    # # 2. 初始化设备
    # print("\n【2】初始化设备...")
    # controller.init_device(0)
    # print(f"  设备名称: {controller.get_device_name()},  版本号: {controller.get_version()}")
    #
    # # 3. 获取标定数据
    # print("\n【3】读取标定数据...")
    # calib_params = controller.get_calibration_data(verify_checksum=True)
    # print("  ✓ 数据读取成功！")
    #
    # parser = CalibrationParser(calib_params.params)
    # all_calib_info = parser.get_all_camera_intrinsic()
    #
    # new_calib_info = CalibrationScaler.scale_intrinsic(
    #     calib_info=all_calib_info.copy(),
    #     target_width=640,
    #     target_height=540,
    #     camera_type='rgb'
    # )
    #
    # new_calib_info = CalibrationScaler.update_alignment_coeffs(new_calib_info)

    QApplication.setAttribute(Qt.AA_EnableHighDpiScaling, True)
    QApplication.setAttribute(Qt.AA_UseHighDpiPixmaps, True)

    if hasattr(Qt, 'HighDpiScaleFactorRoundingPolicy'):
        QApplication.setHighDpiScaleFactorRoundingPolicy(
            Qt.HighDpiScaleFactorRoundingPolicy.PassThrough)

    app = QApplication(sys.argv)
    app.setStyleSheet("""
        QToolTip {
            background-color: rgb(230, 230, 230);
            color: black;
            border: None;
            font-family: "YaHei Consolas Hybrid";
            font-size: 8pt;
        }
        """)

    demo = RMSLViewer()
    app.exec_()