"""
!/usr/bin/env python
-*- coding: utf-8 -*-
@CreateTime : 2025/7/14 14:31
@Author  :    AnimateX
@Contact :    animatex@163.com
@File    :    rmsl_viewer.py
@License :    Copyright © 2024 AnimateX. All rights reserved.
@Version :    rmsl_viewer_2025/7/14.0.1

-------------------------------------------------------------------------------
# @Description:
    Show disp/depth/pointcloud
    ---------------------------------------------------------------------------
    [Update History]:
        2025/7/14:

-------------------------------------------------------------------------------
"""
from PyQt5.QtGui import QPixmap, QImage, QPainter, QIcon
from PyQt5.QtCore import QObject, QTimer, Qt, pyqtSignal, QThread, pyqtSlot
from PyQt5.QtWidgets import (QWidget, QGraphicsScene, QGraphicsView, QApplication, QMessageBox, QGraphicsPixmapItem, QFileDialog)

import os
import cv2
import sys
import time
import json
import queue
import struct
import threading
import subprocess
import numpy as np
from pathlib import Path
from collections import deque
from datetime import datetime
from typing import Tuple, Dict, Optional, Union, Any

from viewer_rc import Ui_RMSLViewer


def get_project_root():
    if "__file__" in globals():
        root = Path(__file__).resolve().parent
    else:
        root = Path(sys.argv[0]).resolve().parent
    return root


PROJECT_ROOT = get_project_root()
# log
RMSL_LOG_PATH = PROJECT_ROOT / "viewer.log"
# adb
ADB = str(PROJECT_ROOT / "adb/adb.exe")

# class 全局messagebox美化
MessageBox_Style = """
        QMessageBox {
            background-color: #F5F5F5;
            font-family: 'YaHei Consolas Hybrid', 'Microsoft YaHei', sans-serif;
            font-size: 10px;
            padding: 10px;
            spacing: 8px; /* 控制图标与文本之间的基础间距 */
        }
        QMessageBox QLabel {
            color: #333333;
            font-size: 12px;
            min-width: 0px; /* 必须保留 */
            margin-left: 0px; /* 比之前减少负边距 */
            padding-left: 0px;
        }
        /* 关键修复：调整图标容器 */
        QMessageBox::icon-label {
            min-width: 32px;  /* 放大图标容器 */
            min-height: 32px;
            margin-right: 0px;
            padding: 0px;
            qproperty-alignment: AlignCenter; /* 确保图标居中 */
        }
        /* 按钮区域样式保持不变 */
        QMessageBox QPushButton {
            background-color: #4A90E2;
            font-family: 'YaHei Consolas Hybrid', 'Microsoft YaHei', sans-serif;
            font-size: 12px;
            color: white;
            border-radius: 4px;
            padding: 5px 15px;
            outline: none;
            border: none;
            min-width: 80px;
            min-height: 25px;
        }
        QMessageBox QPushButton:hover {
            background-color: #5A9AE8;
        }
        QMessageBox QPushButton:pressed {
            background-color: #3A80D2;
        }
        """


def timeit(func):
    def wrapper(*args, **kwargs):
        start = time.perf_counter()
        result = func(*args, **kwargs)
        end = time.perf_counter()
        print(f"{func.__name__} 耗时: {end - start:.6f} 秒")
        return result
    return wrapper


class Logger(QObject):
    log_signal = pyqtSignal(str)

    def __init__(self, log_file_path=None):
        super().__init__()
        self.log_file_path = log_file_path
        self.file_logging_enabled = log_file_path is not None
        self._current_line_start = True  # 跟踪是否在行首

    def _write_to_file(self, msg, with_newline=True, with_timestamp=True):
        if not self.file_logging_enabled:
            return

        try:
            with open(self.log_file_path, 'a', encoding='utf-8') as f:
                # 只在行首添加时间戳
                if with_timestamp and self._current_line_start:
                    timestamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
                    f.write(f"[{timestamp}] ")

                f.write(msg)

                if with_newline:
                    f.write('\n')
                    self._current_line_start = True
                else:
                    self._current_line_start = False

                f.flush()
        except Exception as e:
            msg = f" [Error] Can not open log file. Error info: {e}"

    def log(self, msg):
        self.log_signal.emit(msg)
        self._write_to_file(msg, with_newline=True)

    def log_to_file(self, msg):
        self._write_to_file(msg, with_newline=True)


logger = None


def init_logger(log_file_path=None):
    global logger
    if logger is None:
        logger = Logger(log_file_path)
    return logger


class SensorDataMatcher:
    def __init__(self, max_time_diff=200, max_queue_size=20):
        self.rgb_buffer = deque()
        self.depth_buffer = deque()
        self.max_time_diff = max_time_diff
        self.max_queue_size = max_queue_size
        self.lock = threading.Lock()
        self.logger = init_logger(RMSL_LOG_PATH)

    def add_rgb_data(self, rgb_img, timestamp):
        with self.lock:
            self.rgb_buffer.append({
                'img': rgb_img,
                'timestamp': timestamp
            })
            if len(self.rgb_buffer) > self.max_queue_size:
                self.rgb_buffer.popleft()

    def add_depth_data(self, disp_img, depth_img, timestamp, pse_color_depth_img=None):
        with self.lock:
            self.depth_buffer.append({
                'disp': disp_img,
                'depth': depth_img,
                'pse_color': pse_color_depth_img,
                'timestamp': timestamp
            })
            if len(self.depth_buffer) > self.max_queue_size:
                self.depth_buffer.popleft()

    def find_best_match(self):
        with self.lock:
            if not self.rgb_buffer or not self.depth_buffer:
                return None, None

            # 取最新的深度数据
            latest_depth = self.depth_buffer.pop()
            target_timestamp = latest_depth['timestamp']

            # 在RGB队列中找最匹配的帧（从后往前找，因为数据是按时间排序的）
            best_rgb = None
            best_rgb_index = -1
            min_time_diff = float('inf')

            for i, rgb_data in enumerate(reversed(self.rgb_buffer)):
                rgb_timestamp = rgb_data['timestamp']

                if target_timestamp - rgb_timestamp > self.max_time_diff * 2:
                    break

                if rgb_timestamp <= target_timestamp:
                    time_diff = target_timestamp - rgb_timestamp
                    if time_diff < min_time_diff:
                        min_time_diff = time_diff
                        best_rgb = rgb_data
                        # 注意：reversed的索引需要转换
                        best_rgb_index = len(self.rgb_buffer) - 1 - i

                        if time_diff < 5:
                            break

            if best_rgb and min_time_diff <= self.max_time_diff:
                # 安全地移除RGB数据
                if 0 <= best_rgb_index < len(self.rgb_buffer):
                    del self.rgb_buffer[best_rgb_index]

                return best_rgb, latest_depth
            else:
                # 如果没找到匹配，把深度数据放回去
                self.depth_buffer.append(latest_depth)
                return None, None


class UVCReader(threading.Thread):
    def __init__(self,
                 camera_name: str = 'UVC Camera',
                 width: int = 1280,
                 height: int = 1080,
                 fps: int = 30,
                 pixel_format: str = 'yuyv422',
                 buffer_size: int = 5,
                 ffmpeg_path: str = './ffmpeg/ffmpeg.exe'):
        super().__init__()
        self.logger = init_logger(RMSL_LOG_PATH)
        self.daemon = True

        self.width = width
        self.height = height
        self.pixel_format = pixel_format
        self.frame_size = width * height * 2
        self.buffer = deque(maxlen=buffer_size)
        self._stop_event = threading.Event()

        self.ffmpeg_command = [
            ffmpeg_path,
            '-f', 'dshow',
            '-rtbufsize', '300M',
            '-video_size', f'{self.width}x{self.height}',
            '-framerate', str(fps),
            '-pixel_format', self.pixel_format,
            '-i', f'video={camera_name}',
            '-c:v', 'rawvideo',
            '-f', 'image2pipe',
            '-'
        ]

        self.process = None

    def run(self):
        try:
            self.process = subprocess.Popen(
                self.ffmpeg_command,
                stdout=subprocess.PIPE,
                stderr=subprocess.DEVNULL
            )

            self.logger.log_to_file(" [Info] UVC reader thread started.")

            while not self._stop_event.is_set():
                chunk = self.process.stdout.read(self.frame_size)

                if len(chunk) == self.frame_size:
                    self.buffer.append(chunk)
                elif self.process.poll() is not None:
                    self.logger.log_to_file(" [Warn] FFMPEG process has terminated.")
                    break
                else:
                    self.logger.log_to_file(" [Info] Incomplete frame data received, might be end of stream.")
                    time.sleep(0.01)

        except FileNotFoundError:
            self.logger.log_to_file(
                f" [Error] FFMPEG executable not found at '{self.ffmpeg_command[0]}'. Please check the path.")
        except Exception as e:
            self.logger.log_to_file(f" [Error] An exception occurred in the reader thread: {e}")
            if self.process:
                self.logger.log_to_file(f" [Error] FFMPEG stderr: {self.process.stderr.read().decode()}")
        finally:
            if self.process and self.process.poll() is None:
                self.process.terminate()
                self.process.wait()
            self.logger.log_to_file(" [Info] UVC reader thread stopped.")

    def get_frame(self):
        try:
            return self.buffer.popleft()
        except IndexError:
            # self.logger.log_to_file(f" [Error] Failed get frame! Please power off device.")
            return None

    def stop(self):
        self._stop_event.set()
        self.join(timeout=5)
        self.logger.log_to_file(f" [Info] UVC Reader thread stopped.")


class ColorizerNew:
    def __init__(self,
                 color_map: Optional[Dict[float, np.ndarray]] = None,
                 lut_size: int = 65536,
                 mode='jet_red2blue'):
        self.color_map = color_map if color_map else ColorizerNew.default_jet_map(mode=mode)
        self.lut_size = lut_size
        self.lut = None
        self._build_lut()

    @staticmethod
    def default_jet_map(mode='jet_red2blue'):
        # Red to blue
        jet_red2blue = {
            0.00: np.array([0.5, 0, 0]),  # 深红色
            0.11: np.array([1, 0, 0]),  # 红色
            0.35: np.array([1, 1, 0]),  # 黄色
            0.50: np.array([0, 1, 0]),  # 绿色
            0.64: np.array([0, 1, 1]),  # 青色
            0.86: np.array([0, 0, 1]),  # 蓝色
            1.00: np.array([0, 0, 0.5]),  # 深蓝色
        }
        jet_blue2red = {
            0.00: np.array([0, 0, 0.5]),  # 深蓝色
            0.11: np.array([0, 0, 1]),  # 蓝色
            0.35: np.array([0, 1, 1]),  # 青色
            0.50: np.array([0, 1, 0]),  # 绿色
            0.64: np.array([1, 1, 0]),  # 黄色
            0.86: np.array([1, 0, 0]),  # 红色
            1.00: np.array([0.5, 0, 0]),  # 深红色
        }
        if mode == 'jet_red2blue':
            return jet_red2blue
        else:
            return jet_blue2red

    def _build_lut(self):
        # 创建查找表
        self.lut = np.zeros((self.lut_size, 3), dtype=np.float32)

        # 获取颜色映射的关键点
        positions = np.array(sorted(self.color_map.keys()))
        colors = np.array([self.color_map[pos] for pos in positions])

        # 为查找表的每个位置插值计算颜色
        lut_positions = np.linspace(0, 1, self.lut_size)

        # 对RGB三个通道分别进行插值
        for channel in range(3):
            self.lut[:, channel] = np.interp(lut_positions, positions, colors[:, channel])

    def set_colormap(self, colormap: Dict[float, np.ndarray]):
        """
        设置新的颜色映射

        Args:
            colormap: 颜色映射字典
        """
        self.color_map = colormap
        self._build_lut()

    def colorize(self,
                 depth_image: np.ndarray,
                 depth_range: Optional[Tuple[float, float]] = None,
                 normalize: bool = True) -> np.ndarray:
        """
        对深度图像进行伪彩色化

        Args:
            depth_image: 输入的深度图像 (8bit或16bit)
            depth_range: 深度值范围 (min_depth, max_depth)，None时自动计算
            normalize: 是否进行归一化处理

        Returns:
            BGR格式的彩色图像 (uint8)
        """
        # 确保输入是numpy数组
        if not isinstance(depth_image, np.ndarray):
            depth_image = np.array(depth_image)

        # 处理深度范围
        if depth_range is None:
            if normalize:
                # 自动计算有效深度范围（排除0值）
                valid_mask = depth_image > 0
                if np.any(valid_mask):
                    min_depth = np.min(depth_image[valid_mask])
                    max_depth = np.max(depth_image[valid_mask])
                else:
                    min_depth, max_depth = 0, 1
            else:
                min_depth = np.min(depth_image)
                max_depth = np.max(depth_image)
        else:
            min_depth, max_depth = depth_range

        # 避免除零错误
        if max_depth == min_depth:
            max_depth = min_depth + 1

        # 将深度值归一化到[0, 1]范围
        normalized_depth = (depth_image.astype(np.float32) - min_depth) / (max_depth - min_depth)

        # 限制在[0, 1]范围内
        normalized_depth = np.clip(normalized_depth, 0, 1)

        # 将归一化的深度值映射到查找表索引
        lut_indices = (normalized_depth * (self.lut_size - 1)).astype(np.int32)

        # 使用查找表进行颜色映射
        colored_image = self.lut[lut_indices]

        # 转换为BGR格式 (OpenCV格式)，并转换为uint8
        colored_image_bgr = (colored_image[:, :, ::-1] * 255).astype(np.uint8)

        return colored_image_bgr

    def colorize_with_mask(self, depth_image: np.ndarray,
                           mask: Optional[np.ndarray] = None,
                           depth_range: Optional[Tuple[float, float]] = None,
                           background_color: Tuple[int, int, int] = (0, 0, 0)) -> np.ndarray:
        """
        带掩码的伪彩色化

        Args:
            depth_image: 输入的深度图像
            mask: 有效区域掩码，None时自动生成（深度值>0的区域）
            depth_range: 深度值范围
            background_color: 无效区域的背景色 (B, G, R)

        Returns:
            BGR格式的彩色图像
        """
        # 生成掩码
        if mask is None:
            mask = depth_image > 0

        # 对有效区域进行伪彩色化
        colored_image = self.colorize(depth_image, depth_range, normalize=True)

        # 设置无效区域为背景色
        colored_image[~mask] = background_color

        return colored_image

    def get_colorbar(self, width: int = 50, height: int = 256,
                     vertical: bool = True) -> np.ndarray:
        """
        生成颜色条

        Args:
            width: 颜色条宽度
            height: 颜色条高度
            vertical: 是否为垂直方向

        Returns:
            颜色条图像 (BGR格式)
        """
        if vertical:
            # 垂直颜色条
            gradient = np.linspace(1, 0, height).reshape(-1, 1)
            gradient = np.tile(gradient, (1, width))
        else:
            # 水平颜色条
            gradient = np.linspace(0, 1, width).reshape(1, -1)
            gradient = np.tile(gradient, (height, 1))

        # 使用查找表生成颜色条
        lut_indices = (gradient * (self.lut_size - 1)).astype(np.int32)
        colorbar = self.lut[lut_indices]

        # 转换为BGR格式
        colorbar_bgr = (colorbar[:, :, ::-1] * 255).astype(np.uint8)

        return colorbar_bgr

    def adaptive_colorize(self, depth_image: np.ndarray,
                          percentile_range: Tuple[float, float] = (2, 98)) -> np.ndarray:
        """
        自适应伪彩色化，基于百分位数自动确定深度范围

        Args:
            depth_image: 输入的深度图像
            percentile_range: 百分位数范围，用于确定深度范围

        Returns:
            BGR格式的彩色图像
        """
        # 计算有效深度值
        valid_mask = depth_image > 0
        if not np.any(valid_mask):
            return np.zeros((*depth_image.shape, 3), dtype=np.uint8)

        valid_depths = depth_image[valid_mask]

        # 基于百分位数确定深度范围
        min_depth = np.percentile(valid_depths, percentile_range[0])
        max_depth = np.percentile(valid_depths, percentile_range[1])

        return self.colorize_with_mask(depth_image, valid_mask, (min_depth, max_depth))


class Colorizer(object):
    def __init__(self, color_map=None, levels=4096, depth_range=(0, 65535), mode='jet_red2blue'):
        self.color_map = color_map if color_map else self.default_jet_map(mode=mode)
        self.levels = levels
        self.min_dis, self.max_dis = depth_range
        self.cache = self.generate_cache()
        self.logger = init_logger(RMSL_LOG_PATH)

    @staticmethod
    def default_jet_map(mode='jet_red2blue'):
        # Red to blue
        jet_red2blue = {
            0.00: np.array([0.5, 0, 0]),  # 深红色
            0.11: np.array([1, 0, 0]),  # 红色
            0.35: np.array([1, 1, 0]),  # 黄色
            0.50: np.array([0, 1, 0]),  # 绿色
            0.64: np.array([0, 1, 1]),  # 青色
            0.86: np.array([0, 0, 1]),  # 蓝色
            1.00: np.array([0, 0, 0.5]),  # 深蓝色
        }
        jet_blue2red = {
            0.00: np.array([0, 0, 0.5]),  # 深蓝色
            0.11: np.array([0, 0, 1]),  # 蓝色
            0.35: np.array([0, 1, 1]),  # 青色
            0.50: np.array([0, 1, 0]),  # 绿色
            0.64: np.array([1, 1, 0]),  # 黄色
            0.86: np.array([1, 0, 0]),  # 红色
            1.00: np.array([0.5, 0, 0]),  # 深红色
        }
        if mode == 'jet_red2blue':
            return jet_red2blue
        else:
            return jet_blue2red

    def generate_cache(self):
        # 生成颜色映射缓存
        cache = np.zeros((self.levels, 3))
        sorted_keys = sorted(self.color_map.keys())
        for i in range(self.levels):
            t = i / (self.levels - 1)
            for j in range(len(sorted_keys) - 1):
                if sorted_keys[j] <= t <= sorted_keys[j + 1]:
                    t_norm = (t - sorted_keys[j]) / (sorted_keys[j + 1] - sorted_keys[j])
                    color = (1 - t_norm) * self.color_map[sorted_keys[j]] + t_norm * self.color_map[sorted_keys[j + 1]]
                    cache[i] = color
                    break
        return cache

    def _histogram_equalization_lut(self, depth_data, clip_range):
        depth_data = depth_data.astype(int)

        hist = np.bincount(depth_data.flatten(), minlength=clip_range[1] + 1)
        hist = hist[clip_range[0]:clip_range[1] + 1]

        cdf = hist.cumsum()
        cdf_m = np.ma.masked_equal(cdf, 0)
        cdf_m = (cdf_m - cdf_m.min()) * (self.levels - 1) / (cdf_m.max() - cdf_m.min())
        cdf_final = np.ma.filled(cdf_m, 0).astype(int)

        full_lut = np.zeros(clip_range[1] + 1, dtype=int)
        full_lut[clip_range[0]:clip_range[1] + 1] = cdf_final

        equalized_indices = full_lut[depth_data]

        return equalized_indices

    def apply_color_map(self, indices):
        colored_image = self.cache[indices].reshape(indices.shape + (3,))
        return colored_image

    def colorize(self, depth_data, clip_range, equalize: bool = True, background_color=np.array([0, 0, 0])):
        mask_zero = (depth_data == 0)
        clipped_depth = np.clip(depth_data, clip_range[0], clip_range[1])

        if equalize:
            indices = self._histogram_equalization_lut(clipped_depth, clip_range)
        else:
            # (val - min) / (max - min) * (levels - 1)
            val_range = clip_range[1] - clip_range[0]
            if val_range == 0: val_range = 1

            normalized = (clipped_depth - clip_range[0]).astype(float) / val_range
            indices = (normalized * (self.levels - 1)).astype(int)

        colorized_image = self.apply_color_map(indices)
        colorized_image[mask_zero] = background_color
        colorized_image = (colorized_image * 255).astype(np.uint8)

        return colorized_image


class UtilsBasic(object):
    def __init__(self):
        self.logger = init_logger(RMSL_LOG_PATH)

    def read_json(self, json_path: str) -> dict:
        if not os.path.exists(json_path):
            self.logger.log_to_file(f" [Error] config file: {json_path} not exits!")
            raise FileNotFoundError(f" [Error] config file: {json_path} not exits!")
        try:
            with open(json_path, 'r', encoding='utf-8') as file:
                config = json.load(file)
            return config
        except json.JSONDecodeError as e:
            self.logger.log_to_file(f" [Error] Failed load config file: {json_path}, error log: {e}")
            raise KeyError(f" [Error] Failed load config file: {json_path}, error log: {e}")

    def save_path_check(self, save_path: str):
        if not os.path.exists(save_path):
            os.makedirs(save_path)
            self.logger.log_to_file(f" [Info] Success create save directory: {save_path}!")

    @staticmethod
    def get_formatted_timestamp():
        timestamp = time.time()
        dt_object = datetime.fromtimestamp(timestamp)
        formatted_time = dt_object.strftime("%Y%m%d_%H%M%S")

        return formatted_time

    def colorizer_single_camera_depth_image(self, depth_img, max_dis=1000):
        min_dis = np.min(depth_img)

        fac = 255.0 / max_dis
        dis = (depth_img - min_dis) * fac
        dis = cv2.equalizeHist(dis.astype(np.uint8))
        pseudo_img = cv2.applyColorMap(dis, cv2.COLORMAP_JET)
        gray_color = [240, 240, 240]  # BGR format for OpenCV
        mask = np.logical_or(depth_img == 0, depth_img < 40)
        pseudo_img[mask] = gray_color

        self.logger.log_to_file(f" [Info] Success colorize image(coarse method)!")

        return pseudo_img

    def disp_to_depth_vectorized(self,
                                 disp: np.ndarray,
                                 Q23: float,
                                 Q32: float,
                                 Q33: float = 0.0,
                                 subpixel_value: int = 64,
                                 zoom_ratio: float = 1.0) -> Optional[np.ndarray]:
        if disp is None:
            self.logger.log_to_file(f" [Error] Check disp image, not initial.")
            return None

        if not isinstance(disp, np.ndarray):
            self.logger.log_to_file(f" [Error] Check disp image input, Invalid image.")
            return None

        depth = np.zeros_like(disp, dtype=np.uint16)
        valid_mask = disp != 0

        if np.any(valid_mask):
            disp_float = disp.astype(np.float32)
            denominator = Q32 * disp_float + Q33 * subpixel_value
            valid_denominator = (denominator != 0) & valid_mask

            if np.any(valid_denominator):
                depth_float = np.zeros_like(disp_float)
                depth_float[valid_denominator] = (Q23 * zoom_ratio * subpixel_value /
                                                  denominator[valid_denominator])

                # 四舍五入并转换为uint16，同时处理溢出
                depth_float = np.clip(depth_float, 0, 65535)
                depth = np.round(depth_float).astype(np.uint16)
                depth[~valid_mask] = 0

        return depth

    def generate_pointcloud_by_depth(self,
                                     depth_img,
                                     fx, fy, cx, cy,
                                     img_h, img_w,
                                     min_dis, max_dis,
                                     en_valid_roi=False,
                                     en_cen_roi=False):
        valid_mask = (depth_img >= min_dis) & (depth_img <= max_dis)

        zero_val_cnt = np.sum(depth_img == 0)

        if en_valid_roi:
            if en_cen_roi:
                margin_w = int(0.25 * img_w)
                margin_h = int(0.25 * img_h)
            else:
                margin_w = int(0.05 * img_w)
                margin_h = int(0.05 * img_h)

            start_u, end_u = margin_w, img_w - margin_w
            start_v, end_v = margin_h, img_h - margin_h

            u, v = np.meshgrid(np.arange(start_u, end_u), np.arange(start_v, end_v))

            x_data = (u - cx) * depth_img[start_v: end_v, start_u: end_u] / fx
            y_data = - (v - cy) * depth_img[start_v: end_v, start_u: end_u] / fy
            z_data = - depth_img[start_v: end_v, start_u: end_u]

            valid_x_data = x_data[valid_mask[start_v:end_v, start_u:end_u]]
            valid_y_data = y_data[valid_mask[start_v:end_v, start_u:end_u]]
            valid_z_data = z_data[valid_mask[start_v:end_v, start_u:end_u]]

            point_cloud_show = np.column_stack((valid_x_data, valid_y_data, valid_z_data))
            point_cloud_save = np.column_stack((valid_x_data, - valid_y_data, - valid_z_data))
        else:
            """ Global point cloud """
            u, v = np.meshgrid(np.arange(0, img_w), np.arange(0, img_h))

            x_data = (u - cx) * depth_img / fx
            y_data = - (v - cy) * depth_img / fy
            z_data = - depth_img

            valid_x_data = x_data[valid_mask]
            valid_y_data = y_data[valid_mask]
            valid_z_data = z_data[valid_mask]

            point_cloud_show = np.column_stack((valid_x_data, valid_y_data, valid_z_data))
            point_cloud_save = np.column_stack((valid_x_data, - valid_y_data, - valid_z_data))

        self.logger.log_to_file(f" [Info] Success generate pointcloud, zero points count: {zero_val_cnt}")

        return point_cloud_show, point_cloud_save, zero_val_cnt

    @timeit
    def generate_rgb_pointcloud(self, rgb, depth, rgbCamParam: dict, depth_ratio: float = 1.0, dis_near: int = 100,
                                dis_far: int = 3000):
        fx = rgbCamParam['fx']
        fy = rgbCamParam['fy']
        cx = rgbCamParam['cx']
        cy = rgbCamParam['cy']

        dis_near *= depth_ratio
        dis_far *= depth_ratio

        height, width = depth.shape
        ratio = 1 / depth_ratio

        # Create meshgrid for pixel coordinates
        x = np.arange(width)
        y = np.arange(height)
        xv, yv = np.meshgrid(x, y)

        # Flatten the arrays for easier processing
        xv = xv.flatten()
        yv = yv.flatten()
        depth_flat = depth.flatten()

        # Filter points based on depth range
        valid = (depth_flat >= dis_near) & (depth_flat <= dis_far)
        xv = xv[valid]
        yv = yv[valid]
        depth_flat = depth_flat[valid]

        z = depth_flat * ratio
        x = (xv - cx) * z / fx
        y = (yv - cy) * z / fy

        # Correct RGB reading order
        rgb_flat = rgb.reshape(-1, 3)
        rgb_flat = rgb_flat[valid]

        # Create the point cloud array
        points = np.zeros((len(z), 6), dtype=np.float32)
        points[:, 0] = x
        points[:, 1] = y
        points[:, 2] = z
        points[:, 3] = rgb_flat[:, 0]  # r
        points[:, 4] = rgb_flat[:, 1]  # g
        points[:, 5] = rgb_flat[:, 2]  # b

        self.logger.log_to_file(f" [Info] Success generate rgb pointcloud.")

        return points

    @timeit
    def generate_rgb_pointcloud_optimized(self, rgb, depth, rgbCamParam: dict, depth_ratio: float = 1.0,
                                          dis_near: int = 100, dis_far: int = 3000):
        # 无效优化
        fx = rgbCamParam['fx']
        fy = rgbCamParam['fy']
        cx = rgbCamParam['cx']
        cy = rgbCamParam['cy']

        dis_near *= depth_ratio
        dis_far *= depth_ratio
        height, width = depth.shape

        # 1. 创建一个布尔掩码，直接在2D深度图上操作
        valid_mask = (depth >= dis_near) & (depth <= dis_far)

        # 2. 创建坐标网格
        # np.indices 更高效，因为它只创建一次数组
        yv, xv = np.indices((height, width))

        # 3. 对所有有效的点进行计算
        # 使用 valid_mask 来选择我们关心的点
        z = depth[valid_mask] / depth_ratio
        x = (xv[valid_mask] - cx) * z / fx
        y = (yv[valid_mask] - cy) * z / fy

        # 4. 提取对应的颜色值
        # rgb[valid_mask] 会直接展平并选取有效点
        colors = rgb[valid_mask]

        # 5. 将XYZ和RGB合并成一个数组
        # np.column_stack 是将一维数组作为列合并成二维数组的最高效方法
        points = np.column_stack((x, y, z, colors))

        # self.logger.log_to_file(f" [Info] Success generate rgb pointcloud with {len(points)} points.")
        return points.astype(np.float32)

    def save_ply_ascii_vectorized(self, point_cloud_data, filename):
        n_points = len(point_cloud_data)

        # PLY文件头
        header = f"""ply
    format ascii 1.0
    element vertex {n_points}
    property float x
    property float y
    property float z
    end_header
    """

        # 使用列表推导式和join，比循环快很多
        data_lines = [f"{point[0]:.6f} {point[1]:.6f} {point[2]:.6f}" for point in point_cloud_data]
        data_content = '\n'.join(data_lines)

        with open(filename, 'w') as f:
            f.write(header + data_content)

        self.logger.log_to_file(f" [Info] Success save ascii vectorized pointcloud")

    def save_ply_ascii_rgb_vectorized(self, point_cloud_data, filename):
        """
        保存带RGB颜色的点云到ASCII格式的PLY文件。

        参数:
        point_cloud_data (np.ndarray): Nx6 的NumPy数组 (x, y, z, r, g, b)。
                                       其中RGB颜色值应在 [0, 255] 范围内。
        filename (str): 输出文件名。
        """
        if point_cloud_data.shape[1] != 6:
            raise ValueError("输入数据必须是 Nx6 维度的 (x, y, z, r, g, b)")

        n_points = len(point_cloud_data)

        # PLY文件头，保持不变
        header = f"""ply
        format ascii 1.0
        element vertex {n_points}
        property float x
        property float y
        property float z
        property uchar red
        property uchar green
        property uchar blue
        end_header
        """
        # 也可以使用NumPy的savetxt函数，它能更高效、更安全地处理格式化和写入
        # 注意：savetxt需要将数据转换为整数类型以匹配 'uchar' 的格式
        # 我们创建一个临时数组来做这件事

        data_to_save = np.copy(point_cloud_data)
        # 将颜色列转换为整数类型
        data_to_save[:, 3:6] = data_to_save[:, 3:6].astype(np.uint8)

        # 使用 np.savetxt 进行保存
        # fmt 参数为每一列指定格式
        # header 参数直接写入文件头
        # comments='' 避免在文件头前添加'#'注释符
        np.savetxt(filename,
                   data_to_save,
                   fmt=['%.6f', '%.6f', '%.6f', '%d', '%d', '%d'],
                   header=header.strip(),  # strip() 移除 header 可能有的多余空白
                   comments='',
                   newline='\n')

        self.logger.log_to_file(f" [Info] Success save ascii vectorized rgb-pointcloud")

    @timeit
    def save_ply_ascii_rgb_chunked(self, point_cloud_data, filename, chunk_size=10000):
        """方案4: 分块处理大数据集"""
        if point_cloud_data.shape[1] != 6:
            raise ValueError("输入数据必须是 Nx6 维度的 (x, y, z, r, g, b)")

        n_points = len(point_cloud_data)

        header = f"""ply
        format ascii 1.0
        element vertex {n_points}
        property float x
        property float y
        property float z
        property uchar red
        property uchar green
        property uchar blue
        end_header
        """

        with open(filename, 'w') as f:
            f.write(header)

            # 分块处理
            for i in range(0, n_points, chunk_size):
                end_idx = min(i + chunk_size, n_points)
                chunk = point_cloud_data[i:end_idx]

                xyz = chunk[:, :3]
                rgb = chunk[:, 3:6].astype(np.uint8)

                lines = [f"{x:.6f} {y:.6f} {z:.6f} {r} {g} {b}\n"
                         for (x, y, z), (r, g, b) in zip(xyz, rgb)]
                f.writelines(lines)

    def save_ply_ascii_rgb_numpy_optimized(self, point_cloud_data, filename):
        """方案2: 优化的numpy版本"""
        if point_cloud_data.shape[1] != 6:
            raise ValueError("输入数据必须是 Nx6 维度的 (x, y, z, r, g, b)")

        n_points = len(point_cloud_data)

        header = f"""ply
    format ascii 1.0
    element vertex {n_points}
    property float x
    property float y
    property float z
    property uchar red
    property uchar green
    property uchar blue
    end_header
    """

        # 避免copy，直接在原数据上操作
        data_to_save = point_cloud_data.copy()  # 如果可以修改原数据，去掉这行
        data_to_save[:, 3:6] = data_to_save[:, 3:6].astype(np.uint8)

        # 使用更高效的savetxt参数
        with open(filename, 'w') as f:
            f.write(header)
            np.savetxt(f,
                       data_to_save,
                       fmt='%.6f %.6f %.6f %d %d %d',  # 单个格式字符串更快
                       delimiter='')  # 已在fmt中指定空格

        self.logger.log_to_file(f" [Info] Success save ascii vectorized rgb-pointcloud")

    def save_ply_ascii_fast(self, point_cloud_data, filename):
        n_points = len(point_cloud_data)

        formatted_data = np.array2string(point_cloud_data,
                                         separator=' ',
                                         formatter={'float_kind': lambda x: f"{x:.6f}"},
                                         threshold=np.inf,
                                         max_line_width=np.inf)

        formatted_data = formatted_data.replace('[', '').replace(']', '')
        lines = formatted_data.split('\n')

        with open(filename, 'w') as f:
            # PLY文件头
            f.write("ply\n")
            f.write("format ascii 1.0\n")
            f.write(f"element vertex {n_points}\n")
            f.write("property float x\n")
            f.write("property float y\n")
            f.write("property float z\n")
            f.write("end_header\n")

            for line in lines:
                if line.strip():
                    f.write(line.strip() + '\n')

        self.logger.log_to_file(f" [Info] Success save pointcloud(ascii fast)")

    def save_ply_binary_fast(self, point_cloud_data, filename):
        n_points = len(point_cloud_data)

        with open(filename, 'wb') as f:
            header = f"ply\nformat binary_little_endian 1.0\nelement vertex {n_points}\n"
            header += "property float x\nproperty float y\nproperty float z\nend_header\n"
            f.write(header.encode('ascii'))

            point_cloud_float32 = point_cloud_data.astype(np.float32, copy=False)
            if not point_cloud_float32.flags.c_contiguous:
                point_cloud_float32 = np.ascontiguousarray(point_cloud_float32)

            f.write(point_cloud_float32.tobytes())

        self.logger.log_to_file(f" [Info] Success save pointcloud(binary fast)")

    def save_ply_binary_rgb_fast(self, point_cloud_data, filename):
        """
        高效地保存带RGB颜色的点云到二进制（little-endian）格式的PLY文件。

        参数:
        point_cloud_data (np.ndarray): Nx6 的NumPy数组 (x, y, z, r, g, b)。
                                        其中RGB颜色值应在 [0, 255] 范围内。
        filename (str): 输出文件名。
        """
        if point_cloud_data.shape[1] != 6:
            raise ValueError("输入数据必须是 Nx6 维度的 (x, y, z, r, g, b)")

        n_points = len(point_cloud_data)

        with open(filename, 'wb') as f:
            # 1. 严格按照您成功案例的格式构建文件头，确保每个换行符`\n`都明确无误
            header_lines = [
                "ply",
                "format binary_little_endian 1.0",
                f"element vertex {n_points}",
                "property float x",
                "property float y",
                "property float z",
                "property uchar red",
                "property uchar green",
                "property uchar blue",
                "end_header",
            ]
            header = "\n".join(header_lines) + "\n"
            f.write(header.encode('ascii'))

            # 2. 创建一个临时数组来准备顶点数据，避免修改原始数据
            # 这一步将 float 坐标和 uchar 颜色组合到一个结构中
            vertices = np.zeros(n_points, dtype=[
                ('x', 'f4'), ('y', 'f4'), ('z', 'f4'),
                ('r', 'u1'), ('g', 'u1'), ('b', 'u1')
            ])

            xyz = point_cloud_data[:, :3].astype(np.float32)

            # 再次确认颜色范围转换
            # 如果原始颜色数据是 0.0-1.0，需要缩放到 0-255
            # rgb = (point_cloud_data[:, 3:6] * 255).astype(np.uint8)
            # 如果原始颜色数据已经是 0-255，直接使用:
            rgb = point_cloud_data[:, 3:6].astype(np.uint8)

            vertices['x'] = xyz[:, 0]
            vertices['y'] = xyz[:, 1]
            vertices['z'] = xyz[:, 2]
            vertices['r'] = rgb[:, 2]
            vertices['g'] = rgb[:, 1]
            vertices['b'] = rgb[:, 0]

            # 3. 将结构化数组的字节写入文件
            # NumPy 结构化数组在没有`align=True`的情况下，其内存布局
            # 等同于C语言的 struct，调用 tobytes() 会产生紧密打包的字节流。
            # 之前方案的 align=False 是为了强制关闭可能存在的默认对齐，但通常
            # 在这种混合基础类型的简单结构中，默认行为就是紧密的。
            f.write(vertices.tobytes())

        self.logger.log_to_file(f" [Info] Success save rgb-pointcloud(binary fast)")

    def save_ply_binary_rgb_corrected(self, point_cloud_data, filename):
        """修正版本的二进制PLY保存"""
        if point_cloud_data.shape[1] != 6:
            raise ValueError("输入数据必须是 Nx6 维度的 (x, y, z, r, g, b)")

        n_points = len(point_cloud_data)

        with open(filename, 'wb') as f:
            # 写入头部
            header_lines = [
                "ply",
                "format binary_little_endian 1.0",
                f"element vertex {n_points}",
                "property float x",
                "property float y",
                "property float z",
                "property uchar red",
                "property uchar green",
                "property uchar blue",
                "end_header",
            ]
            header = "\n".join(header_lines) + "\n"
            f.write(header.encode('ascii'))

            # 创建结构化数组
            vertices = np.zeros(n_points, dtype=[
                ('x', 'f4'), ('y', 'f4'), ('z', 'f4'),
                ('r', 'u1'), ('g', 'u1'), ('b', 'u1')
            ])

            # 坐标数据
            xyz = point_cloud_data[:, :3].astype(np.float32)
            vertices['x'] = xyz[:, 0]
            vertices['y'] = xyz[:, 1]
            vertices['z'] = xyz[:, 2]

            # 颜色数据处理 - 关键修正点
            rgb_data = point_cloud_data[:, 3:6]

            # 检查数据范围并处理
            if rgb_data.max() <= 1.0:
                # 0-1范围，需要缩放到0-255
                rgb = (rgb_data * 255).astype(np.uint8)
            else:
                # 已经是0-255范围
                rgb = rgb_data.astype(np.uint8)

            # 正确的RGB映射 (不要搞成BGR!)
            vertices['r'] = rgb[:, 0]  # Red = 第0列
            vertices['g'] = rgb[:, 1]  # Green = 第1列
            vertices['b'] = rgb[:, 2]  # Blue = 第2列

            f.write(vertices.tobytes())

        self.logger.log_to_file(f" [Info] Success save rgb-pointcloud(binary fast)")

    def save_pointcloud_fast(self, point_cloud_save, filename="pointcloud.ply"):
        if len(point_cloud_save) > 50000:
            self.save_ply_binary_fast(point_cloud_save, filename)
        else:
            self.save_ply_ascii_vectorized(point_cloud_save, filename)

    def save_rgb_pointcloud_fast(self, point_cloud_save, filename="rgb_pointcloud.ply"):
        if len(point_cloud_save) > 50000:
            self.save_ply_binary_rgb_corrected(point_cloud_save, filename)
        else:
            self.save_ply_ascii_rgb_chunked(point_cloud_save, filename)

    def is_file_on_device(self, device_path: str, device_serial: Optional[str] = None) -> bool:
        """
        使用 ADB 检查指定路径的文件或目录是否存在于 Android 设备上。
        此版本已修正，可以正确获取设备上命令的退出码。

        Args:
            device_path (str): Android 设备上的绝对路径 (例如, "/sdcard/Download/myfile.txt")。
            device_serial (Optional[str]): 要操作的特定设备序列号。

        Returns:
            bool: 如果文件或目录存在，返回 True，否则返回 False。
        """
        # 关键修正：在 test 命令后附加 "; echo $?"
        # 这会使 shell 先执行 test 命令，然后将 test 命令的退出码 (0 或 1) 打印到标准输出。
        # 我们在 Python 中捕获这个输出，而不是依赖 adb 进程本身的退出码。
        shell_command = f'test -e "{device_path}"; echo $?'

        adb_command = ['adb']
        if device_serial:
            adb_command.extend(['-s', device_serial])
        adb_command.extend(['shell', shell_command])

        # print(f"正在执行命令: {' '.join(adb_command)}")

        try:
            result = subprocess.run(
                adb_command,
                capture_output=True,  # 必须捕获输出
                text=True,  # 将输出解码为文本
                check=False  # 仍然不检查 adb 进程的退出码
            )

            # 首先，检查 adb 命令本身是否执行成功。
            # 如果设备未连接或 adb 出错，result.returncode 会非零。
            if result.returncode != 0:
                self.logger.log_to_file(f" [Error] Failed operate ADB cmd. Return Code: {result.returncode}")
                if result.stderr:
                    self.logger.log_to_file(f" [Error] ADB Error info: {result.stderr.strip()}")
                return False

            # 核心判断逻辑：解析命令的标准输出 (stdout)
            # 成功时，result.stdout 应该是 "0\n" 或 "0"
            # 失败时，result.stdout 应该是 "1\n" 或 "1"
            try:
                # 获取输出，去除末尾的换行符，并转换为整数
                inner_exit_code = int(result.stdout.strip())

                if inner_exit_code == 0:
                    # print(f"成功: 路径 '{device_path}' 在设备上存在。 (设备返回码: 0)")
                    return True
                else:
                    # print(f"失败: 路径 '{device_path}' 不存在或无法访问。 (设备返回码: {inner_exit_code})")
                    return False
            except (ValueError, IndexError):
                # 如果输出不是一个数字，说明发生了预料之外的错误
                # print(f"错误: 无法从设备返回的输出中解析退出码。")
                # print(f"  - 标准输出 (stdout): '{result.stdout.strip()}'")
                # print(f"  - 标准错误 (stderr): '{result.stderr.strip()}'")
                return False

        except FileNotFoundError:
            # print("错误: 'adb' 命令未找到。请确保 Android SDK Platform-Tools 已安装并已添加到系统的 PATH 环境变量中。")
            return False
        except Exception as e:
            print(f"执行 adb 命令时发生未知错误: {e}")
            return False

    def pull_file_from_single_device(self, calib_yaml_evb_path: str, calib_yaml_path: str) -> bool:
        self.logger.log_to_file(" [Info] --- Starting device wait and file pull process ---")
        if os.path.exists(calib_yaml_path):
            self.logger.log_to_file(f" [Warn] Delete cache file: {calib_yaml_path}")
            os.remove(calib_yaml_path)

        try:
            self.logger.log_to_file(" [Info] ⏳ Waiting for a device to be connected and ready...")
            self.logger.log_to_file(" [Info] 🚨 Please connect your device and enable USB debugging now")

            # 使用 check=True，如果 adb 命令本身出错会抛出异常
            subprocess.run(
                [ADB, 'wait-for-device'],
                check=True,
                capture_output=True  # 隐藏此命令的输出
            )

            self.logger.log_to_file(" [Info] ✅ Device detected and ready!")

            # 设备已就绪，直接执行拉取命令
            pull_command = [ADB, 'pull', calib_yaml_evb_path, calib_yaml_path]

            # 再次使用 check=True，如果 pull 失败会抛出异常
            pull_result = subprocess.run(
                pull_command,
                capture_output=True,
                text=True,
                check=True
            )

            self.logger.log_to_file(" [Info] 🎉 Success! File has been pulled successfully.")
            return True
        except FileNotFoundError:
            self.logger.log_to_file(" [Error] ❌ Critical Error: 'adb' command not found.")
            self.logger.log_to_file(" [Error]    Please ensure ADB is installed and in your system's PATH.")
            return False
        except subprocess.CalledProcessError as e:
            error_message = e.stderr.strip()
            if "wait-for-device" in str(e.args):
                self.logger.log_to_file(f" [Error] 🚨 An error occurred while waiting for the device: {error_message}")
            else:  # 错误来自 adb pull
                self.logger.log_to_file(f" [Error] 🚨 Error during 'adb pull': {error_message}")
                self.logger.log_to_file("  [Error]   Please check the following:")
                self.logger.log_to_file(f" [Error]   1. Is the remote file path '{calib_yaml_evb_path}' correct?")
                self.logger.log_to_file(" [Error]   2. Do you have read permissions for that file on the device?")
            return False
        except Exception as e:
            self.logger.log_to_file(f" [Error] An unexpected error occurred: {e}")
            return False


class CalibrationReader:
    def __init__(self, yaml_path):
        self.fs = cv2.FileStorage(yaml_path, cv2.FILE_STORAGE_READ)
        if not self.fs.isOpened():
            raise ValueError(f"Can not open calib file: {yaml_path}")

    def read_scalar(self, key, dtype=int, default=None):
        node = self.fs.getNode(key)
        if node.empty():
            return default

        if dtype == int:
            return int(node.real())
        elif dtype == float:
            return node.real()
        else:
            return node.string()

    def read_matrix(self, key, default=None):
        node = self.fs.getNode(key)
        if node.empty():
            return default
        return node.mat()

    def read_calibration(self, required_keys=None, optional_keys=None):
        intrinsic_info = {}

        if required_keys is None:
            required_keys = ["l_width", "l_height", "K_l", "D_l", "Q"]

        if optional_keys is None:
            optional_keys = {
                "r_width": (int, 640),
                "r_height": (int, 352),
                "rgb_width": (int, 1280),
                "rgb_height": (int, 1080),
                "R1": (None, None),
                "R2": (None, None),
                "P1": (None, None),
                "P2": (None, None),
                "K_rgb": (None, None),
                "D_rgb": (None, None),
                "R_ir2rgb": (None, None),
                "T_ir2rgb": (None, None),
                "invPR": (None, None)
            }

        for key in required_keys:
            if key in ["K_l", "K_r", "D_l", "D_r", "R", "T", "Q", "R1", "R2", "P1", "P2"]:
                value = self.read_matrix(key)
            else:
                value = self.read_scalar(key)

            if value is None:
                raise ValueError(f"Core key: '{key}' not exists!")
            intrinsic_info[key] = value

        for key, (dtype, default) in optional_keys.items():
            if dtype is None:
                intrinsic_info[key] = self.read_matrix(key, default)
            else:
                intrinsic_info[key] = self.read_scalar(key, dtype, default)

        return intrinsic_info

    def close(self):
        if self.fs.isOpened():
            self.fs.release()

    def __enter__(self):
        return self

    def __exit__(self, exc_type, exc_val, exc_tb):
        self.close()


class ParserCamIntrinsic(object):
    def __init__(self, intrinsic_yaml_path: str = './config/calibration.yaml'):
        self.logger = init_logger(RMSL_LOG_PATH)
        self.intrinsic_yaml_path = intrinsic_yaml_path
        if not os.path.exists(intrinsic_yaml_path):
            self.logger.log_to_file(f" [Error] Check camera intrinsic yaml path: {intrinsic_yaml_path}")
            raise FileNotFoundError(f" [Error] Check camera intrinsic yaml path: {intrinsic_yaml_path}")

    def get_all_camera_intrinsic(self):
        calib_info = dict()

        with CalibrationReader(self.intrinsic_yaml_path) as reader:
            intrinsic_info = reader.read_calibration()

            calib_info['l_width'] = intrinsic_info['l_width']
            calib_info['l_height'] = intrinsic_info['l_height']

            calib_info['l_intrinsic'] = {
                'fx': float(intrinsic_info['K_l'][0, 0]),
                'fy': float(intrinsic_info['K_l'][1, 1]),
                'cx': float(intrinsic_info['K_l'][0, 2]),
                'cy': float(intrinsic_info['K_l'][1, 2]),
                'K_l': intrinsic_info['K_l']
            }

            D_l_flat = intrinsic_info['D_l'].flatten()
            calib_info['l_distortion'] = {
                'k1': float(D_l_flat[0]), 'k2': float(D_l_flat[1]),
                'p1': float(D_l_flat[2]), 'p2': float(D_l_flat[3]),
                'k3': float(D_l_flat[4]), 'k4': float(D_l_flat[5]),
                'k5': float(D_l_flat[6]), 'k6': float(D_l_flat[7]),
                's1': float(D_l_flat[8]), 's2': float(D_l_flat[9]),
                's3': float(D_l_flat[10]), 's4': float(D_l_flat[11]),
                'D_l': intrinsic_info['D_l']
            }

            calib_info['R1'] = intrinsic_info['R1']
            calib_info['P1'] = intrinsic_info['P1']

            calib_info['q23'] = intrinsic_info['Q'][2, 3]
            calib_info['q32'] = intrinsic_info['Q'][3, 2]
            calib_info['q33'] = intrinsic_info['Q'][3, 3]

            if intrinsic_info['K_rgb'] is not None:
                calib_info['rgb_intrinsic'] = {
                    'fx': float(intrinsic_info['K_rgb'][0, 0]),
                    'fy': float(intrinsic_info['K_rgb'][1, 1]),
                    'cx': float(intrinsic_info['K_rgb'][0, 2]),
                    'cy': float(intrinsic_info['K_rgb'][1, 2]),
                    'K': intrinsic_info['K_rgb']
                }

                D_rgb_flat = intrinsic_info['D_rgb'].flatten()
                calib_info['rgb_distortion'] = {
                    'k1': float(D_rgb_flat[0]), 'k2': float(D_rgb_flat[1]),
                    'p1': float(D_rgb_flat[2]), 'p2': float(D_rgb_flat[3]),
                    'k3': float(D_rgb_flat[4]), 'k4': float(D_rgb_flat[5]),
                    'k5': float(D_rgb_flat[6]), 'k6': float(D_rgb_flat[7]),
                    's1': float(D_rgb_flat[8]), 's2': float(D_rgb_flat[9]),
                    's3': float(D_rgb_flat[10]),'s4': float(D_rgb_flat[11]),
                    'D': intrinsic_info['D_rgb']
                }

                calib_info['r_coeff'] = {}
                R_proj = intrinsic_info['K_rgb'] @ intrinsic_info['R_ir2rgb'] @ intrinsic_info['invPR']
                calib_info['r_coeff']['r0'] = float(R_proj[0, 0])
                calib_info['r_coeff']['r1'] = float(R_proj[0, 1])
                calib_info['r_coeff']['r2'] = float(R_proj[0, 2])
                calib_info['r_coeff']['r3'] = float(R_proj[1, 0])
                calib_info['r_coeff']['r4'] = float(R_proj[1, 1])
                calib_info['r_coeff']['r5'] = float(R_proj[1, 2])
                calib_info['r_coeff']['r6'] = float(R_proj[2, 0])
                calib_info['r_coeff']['r7'] = float(R_proj[2, 1])
                calib_info['r_coeff']['r8'] = float(R_proj[2, 2])

                intrinsic_info['t_coeff'] = {}
                T_proj = intrinsic_info['K_rgb'] @ intrinsic_info['T_ir2rgb']
                calib_info['t_coeff']['t0'] = float(T_proj[0, 0])
                calib_info['t_coeff']['t1'] = float(T_proj[1, 0])
                calib_info['t_coeff']['t2'] = float(T_proj[2, 0])
                calib_info['category'] = 'rgbd'  # rgbd 模组
            else:
                calib_info['rgb_width'] = intrinsic_info['rgb_width']
                calib_info['rgb_height'] = intrinsic_info['rgb_height']
                calib_info['rgb_intrinsic'] = None
                calib_info['rgb_distortion'] = None
                calib_info['r_coeff'] = None
                calib_info['t_coeff'] = None
                calib_info['category'] = '2rgb'  # 双rgb模组

        return calib_info


class RMSLAlign(object):
    def __init__(self,
                 src_w: int, src_h: int,
                 dst_w: int, dst_h: int,
                 extend_w_pixels=256,
                 extend_h_pixels=256,
                 rotate_angle=0):
        self.src_w = src_w
        self.src_h = src_h
        self.dst_w = dst_w
        self.dst_h = dst_h
        self.rotate_angle = rotate_angle
        self.extend_w_pixel = extend_w_pixels
        self.extend_h_pixel = extend_h_pixels

        self.extend_buf_w = dst_w + 2 * self.extend_w_pixel
        self.extend_buf_h = dst_h + 2 * self.extend_h_pixel

        self.lut = np.zeros((self.src_h, self.src_w, 3), dtype=np.float32)

        self.ir_extend = np.zeros((self.extend_buf_h, self.extend_buf_w), dtype=np.uint8)
        self.dp_extend = np.zeros((self.extend_buf_h, self.extend_buf_w), dtype=np.uint16)

    def create_lut(self, r_coeff: dict):
        if len(r_coeff) == 0 or r_coeff is None:
            return

        cols, rows = np.meshgrid(np.arange(self.src_w), np.arange(self.src_h))
        x = r_coeff['r0'] * cols + r_coeff['r1'] * rows + r_coeff['r2']
        y = r_coeff['r3'] * cols + r_coeff['r4'] * rows + r_coeff['r5']
        z = r_coeff['r6'] * cols + r_coeff['r7'] * rows + r_coeff['r8']

        self.lut[:, :, 0] = x
        self.lut[:, :, 1] = y
        self.lut[:, :, 2] = z

    def align_dp_to_rgb(self, dp_img, t_coeff: dict):
        self.dp_extend.fill(0)

        for row in range(self.src_h):
            for col in range(self.src_w):
                dp_val = dp_img[row, col]
                x, y, z = self.lut[row, col]

                if dp_val > 0:
                    uv_color = [dp_val * x + t_coeff['t0'],
                                dp_val * y + t_coeff['t1'],
                                dp_val * z + t_coeff['t2']]

                    # Avoid division by zero or very small numbers
                    if uv_color[2] < 1e-6:
                        continue

                    inv_z = 1.0 / uv_color[2]

                    new_x = int(uv_color[0] * inv_z + 0.5)
                    new_y = int(uv_color[1] * inv_z + 0.5)

                    if self.rotate_angle == 0:
                        pass
                    elif self.rotate_angle == 90:
                        new_x, new_y = new_y, new_x
                        new_y = self.dst_h - 1 - new_y
                    elif self.rotate_angle == 180:
                        new_x = self.dst_w - 1 - new_x
                        new_y = self.dst_h - 1 - new_y
                    elif self.rotate_angle == 270:
                        new_x, new_y = new_y, new_x
                        new_x = self.dst_w - 1 - new_x

                    # Add a bounds check to ensure the new coordinate is valid
                    if (0 <= new_x < self.dst_w) and (0 <= new_y < self.dst_h):
                        # Get the final value to write
                        pixel_value = int(uv_color[2])

                        # Calculate the top-left corner in the extended buffer
                        final_x = self.extend_w_pixel + new_x
                        final_y = self.extend_h_pixel + new_y

                        if self.rotate_angle == 0:
                            self.dp_extend[final_y, final_x] = pixel_value
                            self.dp_extend[final_y, final_x + 1] = pixel_value
                            self.dp_extend[final_y + 1, final_x] = pixel_value
                            self.dp_extend[final_y + 1, final_x + 1] = pixel_value
                        elif self.rotate_angle == 90:
                            self.dp_extend[final_y, final_x] = pixel_value
                            self.dp_extend[final_y, final_x + 1] = pixel_value
                            self.dp_extend[final_y - 1, final_x] = pixel_value
                            self.dp_extend[final_y - 1, final_x + 1] = pixel_value
                        elif self.rotate_angle == 180:
                            self.dp_extend[final_y, final_x] = pixel_value
                            self.dp_extend[final_y, final_x - 1] = pixel_value
                            self.dp_extend[final_y - 1, final_x] = pixel_value
                            self.dp_extend[final_y - 1, final_x - 1] = pixel_value
                        elif self.rotate_angle == 270:
                            self.dp_extend[final_y, final_x] = pixel_value
                            self.dp_extend[final_y, final_x - 1] = pixel_value
                            self.dp_extend[final_y + 1, final_x] = pixel_value
                            self.dp_extend[final_y + 1, final_x - 1] = pixel_value
                        else:
                            pass

                else:
                    continue

        align_dp_img = self.dp_extend[self.extend_h_pixel:self.extend_buf_h - self.extend_h_pixel,
                       self.extend_w_pixel:self.extend_buf_w - self.extend_w_pixel]

        return align_dp_img

    def align_dp_to_rgb_vectorized(self, dp_img, t_coeff: dict):
        """完全向量化的版本"""
        self.dp_extend.fill(0)

        # 创建有效像素的掩码
        valid_mask = dp_img > 0
        if not np.any(valid_mask):
            return self.dp_extend[
                   self.extend_h_pixel:self.extend_buf_h - self.extend_h_pixel,
                   self.extend_w_pixel:self.extend_buf_w - self.extend_w_pixel
                   ]

        # 获取有效像素的坐标和值
        valid_coords = np.where(valid_mask)
        valid_dp_vals = dp_img[valid_coords]
        valid_lut = self.lut[valid_coords]

        # 向量化计算UV坐标
        uv_color = np.column_stack([
            valid_dp_vals * valid_lut[:, 0] + t_coeff['t0'],
            valid_dp_vals * valid_lut[:, 1] + t_coeff['t1'],
            valid_dp_vals * valid_lut[:, 2] + t_coeff['t2']
        ])

        # 过滤掉z值过小的点
        z_valid = uv_color[:, 2] >= 1e-6
        if not np.any(z_valid):
            return self.dp_extend[
                   self.extend_h_pixel:self.extend_buf_h - self.extend_h_pixel,
                   self.extend_w_pixel:self.extend_buf_w - self.extend_w_pixel
                   ]

        uv_color = uv_color[z_valid]

        # 透视投影
        inv_z = 1.0 / uv_color[:, 2]
        new_coords = np.column_stack([
            (uv_color[:, 0] * inv_z + 0.5).astype(np.int32),
            (uv_color[:, 1] * inv_z + 0.5).astype(np.int32)
        ])

        # 处理旋转
        if self.rotate_angle == 90:
            new_coords = np.column_stack([new_coords[:, 1], self.dst_h - 1 - new_coords[:, 0]])
        elif self.rotate_angle == 180:
            new_coords = np.column_stack([
                self.dst_w - 1 - new_coords[:, 0],
                self.dst_h - 1 - new_coords[:, 1]
            ])
        elif self.rotate_angle == 270:
            new_coords = np.column_stack([self.dst_w - 1 - new_coords[:, 1], new_coords[:, 0]])

        # 边界检查
        bounds_valid = (
                (new_coords[:, 0] >= 0) & (new_coords[:, 0] < self.dst_w) &
                (new_coords[:, 1] >= 0) & (new_coords[:, 1] < self.dst_h)
        )

        if not np.any(bounds_valid):
            return self.dp_extend[
                   self.extend_h_pixel:self.extend_buf_h - self.extend_h_pixel,
                   self.extend_w_pixel:self.extend_buf_w - self.extend_w_pixel
                   ]

        new_coords = new_coords[bounds_valid]
        pixel_values = uv_color[bounds_valid, 2].astype(np.int32)

        # 计算最终坐标并填充2x2区域
        final_coords = new_coords + np.array([self.extend_w_pixel, self.extend_h_pixel])

        # 使用高级索引填充2x2区域
        self._fill_2x2_vectorized(final_coords, pixel_values)

        return self.dp_extend[
               self.extend_h_pixel:self.extend_buf_h - self.extend_h_pixel,
               self.extend_w_pixel:self.extend_buf_w - self.extend_w_pixel
               ]

    def _fill_2x2_vectorized(self, coords, values):
        """向量化填充2x2像素区域"""
        x_coords, y_coords = coords[:, 0], coords[:, 1]

        # 根据旋转角度确定2x2填充模式
        if self.rotate_angle == 0:
            offsets = [(0, 0), (1, 0), (0, 1), (1, 1)]
        elif self.rotate_angle == 90:
            offsets = [(0, 0), (1, 0), (0, -1), (1, -1)]
        elif self.rotate_angle == 180:
            offsets = [(0, 0), (-1, 0), (0, -1), (-1, -1)]
        else:  # 270
            offsets = [(0, 0), (-1, 0), (0, 1), (-1, 1)]

        for dx, dy in offsets:
            final_x = x_coords + dx
            final_y = y_coords + dy

            # 边界检查
            valid_indices = (
                    (final_x >= 0) & (final_x < self.dp_extend.shape[1]) &
                    (final_y >= 0) & (final_y < self.dp_extend.shape[0])
            )

            if np.any(valid_indices):
                self.dp_extend[final_y[valid_indices], final_x[valid_indices]] = values[valid_indices]

    # 方法3: 混合优化版本（推荐）
    def align_dp_to_rgb_optimized(self, dp_img, t_coeff: dict):
        """推荐的优化版本：结合了向量化和实用性"""
        self.dp_extend.fill(0)

        # 预先筛选有效像素
        valid_mask = dp_img > 0
        if not np.any(valid_mask):
            return self.dp_extend[
                   self.extend_h_pixel:self.extend_buf_h - self.extend_h_pixel,
                   self.extend_w_pixel:self.extend_buf_w - self.extend_w_pixel
                   ]

        # 获取有效像素的坐标
        rows, cols = np.where(valid_mask)
        dp_vals = dp_img[rows, cols]
        lut_vals = self.lut[rows, cols]

        # 向量化计算UV坐标
        uv_x = dp_vals * lut_vals[:, 0] + t_coeff['t0']
        uv_y = dp_vals * lut_vals[:, 1] + t_coeff['t1']
        uv_z = dp_vals * lut_vals[:, 2] + t_coeff['t2']

        # 过滤z值
        z_valid = uv_z >= 1e-6
        if not np.any(z_valid):
            return self.dp_extend[
                   self.extend_h_pixel:self.extend_buf_h - self.extend_h_pixel,
                   self.extend_w_pixel:self.extend_buf_w - self.extend_w_pixel
                   ]

        uv_x, uv_y, uv_z = uv_x[z_valid], uv_y[z_valid], uv_z[z_valid]

        # 透视投影
        inv_z = 1.0 / uv_z
        new_x = (uv_x * inv_z + 0.5).astype(np.int32)
        new_y = (uv_y * inv_z + 0.5).astype(np.int32)

        # 旋转处理
        new_x, new_y = self._apply_rotation_vectorized(new_x, new_y)

        # 边界检查
        bounds_valid = (
                (new_x >= 0) & (new_x < self.dst_w) &
                (new_y >= 0) & (new_y < self.dst_h)
        )

        if not np.any(bounds_valid):
            return self.dp_extend[
                   self.extend_h_pixel:self.extend_buf_h - self.extend_h_pixel,
                   self.extend_w_pixel:self.extend_buf_w - self.extend_w_pixel
                   ]

        new_x, new_y = new_x[bounds_valid], new_y[bounds_valid]
        pixel_values = uv_z[bounds_valid].astype(np.int32)

        # 填充像素
        self._fill_pixels_optimized(new_x, new_y, pixel_values)

        return self.dp_extend[
               self.extend_h_pixel:self.extend_buf_h - self.extend_h_pixel,
               self.extend_w_pixel:self.extend_buf_w - self.extend_w_pixel
               ]

    def _apply_rotation_vectorized(self, x, y):
        """向量化旋转处理"""
        if self.rotate_angle == 90:
            return y, self.dst_h - 1 - x
        elif self.rotate_angle == 180:
            return self.dst_w - 1 - x, self.dst_h - 1 - y
        elif self.rotate_angle == 270:
            return self.dst_w - 1 - y, x
        else:
            return x, y

    def _fill_pixels_optimized(self, x_coords, y_coords, values):
        """优化的像素填充"""
        final_x = x_coords + self.extend_w_pixel
        final_y = y_coords + self.extend_h_pixel

        # 批量填充2x2区域
        for dx in range(2):
            for dy in range(2):
                if self.rotate_angle == 0:
                    fx, fy = final_x + dx, final_y + dy
                elif self.rotate_angle == 90:
                    fx, fy = final_x + dx, final_y - dy
                elif self.rotate_angle == 180:
                    fx, fy = final_x - dx, final_y - dy
                else:  # 270
                    fx, fy = final_x - dx, final_y + dy

                # 边界检查并填充
                valid = (
                        (fx >= 0) & (fx < self.dp_extend.shape[1]) &
                        (fy >= 0) & (fy < self.dp_extend.shape[0])
                )

                if np.any(valid):
                    self.dp_extend[fy[valid], fx[valid]] = values[valid]

    def align_dp_and_ir_to_rgb(self, ir_img, dp_img, t_coeff: dict):
        for row in range(self.src_h):
            for col in range(self.src_w):
                ir_val = ir_img[row, col]
                dp_val = dp_img[row, col]
                x, y, z = self.lut[row, col]

                if dp_val > 0:
                    uv_color = [dp_val * x + t_coeff['t0'],
                                dp_val * y + t_coeff['t1'],
                                dp_val * z + t_coeff['t2']]
                    inv_z = 1.0 / uv_color[2]

                    new_x = int(uv_color[0] * inv_z + 0.5)
                    new_y = int(uv_color[1] * inv_z + 0.5)

                    if self.rotate_angle == 0:
                        pass
                    elif self.rotate_angle == 90:
                        new_x, new_y = new_y, new_x
                        new_y = self.dst_h - 1 - new_y
                    elif self.rotate_angle == 180:
                        new_x = self.dst_w - 1 - new_x
                        new_y = self.dst_h - 1 - new_y
                    elif self.rotate_angle == 270:
                        new_x, new_y = new_y, new_x
                        new_x = self.dst_w - 1 - new_x

                    # Add a bounds check to ensure the new coordinate is valid
                    if (0 <= new_x < self.dst_w) and (0 <= new_y < self.dst_h):
                        # Get the final value to write
                        pixel_value = int(uv_color[2])

                        # Calculate the top-left corner in the extended buffer
                        final_x = self.extend_w_pixel + new_x
                        final_y = self.extend_h_pixel + new_y

                        if self.rotate_angle == 0:
                            # process ir
                            self.ir_extend[final_y, final_x] = ir_val
                            self.ir_extend[final_y, final_x + 1] = ir_val
                            self.ir_extend[final_y + 1, final_x] = ir_val
                            self.ir_extend[final_y + 1, final_x + 1] = ir_val

                            # process dp
                            self.dp_extend[final_y, final_x] = pixel_value
                            self.dp_extend[final_y, final_x + 1] = pixel_value
                            self.dp_extend[final_y + 1, final_x] = pixel_value
                            self.dp_extend[final_y + 1, final_x + 1] = pixel_value
                        elif self.rotate_angle == 90:
                            # process ir
                            self.ir_extend[final_y, final_x] = ir_val
                            self.ir_extend[final_y, final_x + 1] = ir_val
                            self.ir_extend[final_y - 1, final_x] = ir_val
                            self.ir_extend[final_y - 1, final_x + 1] = ir_val

                            # process dp
                            self.dp_extend[final_y, final_x] = pixel_value
                            self.dp_extend[final_y, final_x + 1] = pixel_value
                            self.dp_extend[final_y - 1, final_x] = pixel_value
                            self.dp_extend[final_y - 1, final_x + 1] = pixel_value
                        elif self.rotate_angle == 180:
                            # process ir
                            self.ir_extend[final_y, final_x] = ir_val
                            self.ir_extend[final_y, final_x - 1] = ir_val
                            self.ir_extend[final_y - 1, final_x] = ir_val
                            self.ir_extend[final_y - 1, final_x - 1] = ir_val

                            # process dp
                            self.dp_extend[final_y, final_x] = pixel_value
                            self.dp_extend[final_y, final_x - 1] = pixel_value
                            self.dp_extend[final_y - 1, final_x] = pixel_value
                            self.dp_extend[final_y - 1, final_x - 1] = pixel_value
                        elif self.rotate_angle == 270:
                            # process ir
                            self.ir_extend[final_y, final_x] = ir_val
                            self.ir_extend[final_y, final_x - 1] = ir_val
                            self.ir_extend[final_y + 1, final_x] = ir_val
                            self.ir_extend[final_y + 1, final_x - 1] = ir_val

                            # process dp
                            self.dp_extend[final_y, final_x] = pixel_value
                            self.dp_extend[final_y, final_x - 1] = pixel_value
                            self.dp_extend[final_y + 1, final_x] = pixel_value
                            self.dp_extend[final_y + 1, final_x - 1] = pixel_value
                        else:
                            pass

                else:
                    continue

        align_dp_img = self.dp_extend[self.extend_h_pixel:self.extend_buf_h - self.extend_h_pixel,
                       self.extend_w_pixel:self.extend_buf_w - self.extend_w_pixel]

        align_ir_img = self.ir_extend[self.extend_h_pixel:self.extend_buf_h - self.extend_h_pixel,
                       self.extend_w_pixel:self.extend_buf_w - self.extend_w_pixel]

        return align_dp_img, align_ir_img


class RMSLAlignNew(object):
    def __init__(self,
                 src_w: int, src_h: int,
                 dst_w: int, dst_h: int,
                 extend_w_pixels=256,
                 extend_h_pixels=256,
                 rotate_angle=0):
        self.src_w = src_w
        self.src_h = src_h
        self.dst_w = dst_w
        self.dst_h = dst_h
        self.rotate_angle = rotate_angle
        self.extend_w_pixel = extend_w_pixels
        self.extend_h_pixel = extend_h_pixels

        self.extend_buf_w = dst_w + 2 * self.extend_w_pixel
        self.extend_buf_h = dst_h + 2 * self.extend_h_pixel

        self.dp_extend = np.zeros((self.extend_buf_h, self.extend_buf_w), dtype=np.uint16)

        # 存储标定参数
        self.R_ir2rgb = None
        self.T_ir2rgb = None
        self.rgb_intrinsic = {}
        self.depth_intrinsic = {}  # 新增depth相机内参

        self.logger = init_logger(RMSL_LOG_PATH)

    def set_calibration_params(self, intrinsic_info):
        """设置标定参数"""
        self.R_ir2rgb = np.array(
            [[intrinsic_info['r_coeff']['r0'], intrinsic_info['r_coeff']['r1'], intrinsic_info['r_coeff']['r2']],
             [intrinsic_info['r_coeff']['r3'], intrinsic_info['r_coeff']['r4'], intrinsic_info['r_coeff']['r5']],
             [intrinsic_info['r_coeff']['r6'], intrinsic_info['r_coeff']['r7'], intrinsic_info['r_coeff']['r8']]])

        self.T_ir2rgb = np.array([intrinsic_info['t_coeff']['t0'],
                                  intrinsic_info['t_coeff']['t1'],
                                  intrinsic_info['t_coeff']['t2']])

        self.rgb_intrinsic = intrinsic_info['rgb_intrinsic']

        # 如果有depth相机内参，设置它
        if 'l_intrinsic' in intrinsic_info:
            self.depth_intrinsic = intrinsic_info['l_intrinsic']
        else:
            # 如果没有提供，可能需要从Q矩阵或其他源获取
            self.logger.log_to_file(" [Warn] Miss depth intrinsic info.")

    def align_dp_to_rgb(self, dp_img, depth_scale=1.0, debug=True):
        """
        将depth图像对齐到RGB图像

        Args:
            dp_img: depth图像
            depth_scale: depth值的缩放因子，将像素值转换为mm
            debug: 是否打印调试信息
        """
        self.dp_extend.fill(0)

        # RGB相机内参
        fx_rgb = self.rgb_intrinsic['fx']
        fy_rgb = self.rgb_intrinsic['fy']
        cx_rgb = self.rgb_intrinsic['cx']
        cy_rgb = self.rgb_intrinsic['cy']

        # Depth相机内参 - 如果没有提供，使用RGB相机内参作为近似
        if self.depth_intrinsic:
            fx_depth = self.depth_intrinsic['fx']
            fy_depth = self.depth_intrinsic['fy']
            cx_depth = self.depth_intrinsic['cx']
            cy_depth = self.depth_intrinsic['cy']
        else:
            # 使用RGB内参作为近似（通常depth和RGB相机内参相近）
            fx_depth = fx_rgb
            fy_depth = fy_rgb
            cx_depth = cx_rgb
            cy_depth = cy_rgb
            if debug:
                print("使用RGB内参作为depth相机内参的近似")

        # 调试统计
        valid_points = 0
        coord_stats = {'u_min': float('inf'), 'u_max': -float('inf'),
                       'v_min': float('inf'), 'v_max': -float('inf')}

        for row in range(self.src_h):
            for col in range(self.src_w):
                dp_val = dp_img[row, col]
                if dp_val <= 0:
                    continue

                # 1. 将depth像素坐标转换为depth相机3D坐标
                z_depth = dp_val * depth_scale  # 转换为mm
                if z_depth <= 0:
                    continue

                x_depth = (col - cx_depth) * z_depth / fx_depth
                y_depth = (row - cy_depth) * z_depth / fy_depth

                # 2. 变换到RGB相机坐标系
                point_depth = np.array([x_depth, y_depth, z_depth])
                point_rgb = self.R_ir2rgb @ point_depth + self.T_ir2rgb

                # 3. 投影到RGB图像坐标
                if point_rgb[2] <= 1e-6:  # 避免除零和负深度
                    continue

                u_rgb = fx_rgb * point_rgb[0] / point_rgb[2] + cx_rgb
                v_rgb = fy_rgb * point_rgb[1] / point_rgb[2] + cy_rgb

                # 统计坐标范围
                if debug and valid_points < 100:  # 只统计前100个点
                    coord_stats['u_min'] = min(coord_stats['u_min'], u_rgb)
                    coord_stats['u_max'] = max(coord_stats['u_max'], u_rgb)
                    coord_stats['v_min'] = min(coord_stats['v_min'], v_rgb)
                    coord_stats['v_max'] = max(coord_stats['v_max'], v_rgb)

                new_x = int(u_rgb + 0.5)
                new_y = int(v_rgb + 0.5)

                # 4. 应用旋转变换
                if self.rotate_angle == 90:
                    new_x, new_y = new_y, new_x
                    new_y = self.dst_h - 1 - new_y
                elif self.rotate_angle == 180:
                    new_x = self.dst_w - 1 - new_x
                    new_y = self.dst_h - 1 - new_y
                elif self.rotate_angle == 270:
                    new_x, new_y = new_y, new_x
                    new_x = self.dst_w - 1 - new_x

                # 5. 边界检查
                if not (0 <= new_x < self.dst_w and 0 <= new_y < self.dst_h):
                    continue

                valid_points += 1

                # 6. 写入扩展缓冲区 - 简化写入逻辑先测试
                pixel_value = int(dp_val)  # 先使用原始深度值测试

                final_x = self.extend_w_pixel + new_x
                final_y = self.extend_h_pixel + new_y

                # 简化写入，只写一个像素点先测试
                if 0 <= final_y < self.extend_buf_h and 0 <= final_x < self.extend_buf_w:
                    self.dp_extend[final_y, final_x] = pixel_value

        # if debug:
        #     print(f"有效点数: {valid_points}")
        #     print(f"投影坐标范围: u[{coord_stats['u_min']:.1f}, {coord_stats['u_max']:.1f}], "
        #           f"v[{coord_stats['v_min']:.1f}, {coord_stats['v_max']:.1f}]")
        #     print(f"目标图像尺寸: {self.dst_w} x {self.dst_h}")
        #     print(f"RGB内参: fx={fx_rgb:.1f}, fy={fy_rgb:.1f}, cx={cx_rgb:.1f}, cy={cy_rgb:.1f}")

        # 提取最终结果
        align_dp_img = self.dp_extend[self.extend_h_pixel:self.extend_buf_h - self.extend_h_pixel,
                       self.extend_w_pixel:self.extend_buf_w - self.extend_w_pixel]

        return align_dp_img

    def align_dp_to_rgb_vectorized(self, dp_img, depth_scale=1.0):
        """
        将depth图像对齐到RGB图像 (Vectorized and Optimized)

        Args:
            dp_img: depth图像 (NumPy array)
            depth_scale: depth值的缩放因子
        """
        # 0. 清空缓冲区并获取相机参数
        self.dp_extend.fill(0)
        fx_rgb = self.rgb_intrinsic['fx']
        fy_rgb = self.rgb_intrinsic['fy']
        cx_rgb = self.rgb_intrinsic['cx']
        cy_rgb = self.rgb_intrinsic['cy']

        if self.depth_intrinsic:
            fx_depth = self.depth_intrinsic['fx']
            fy_depth = self.depth_intrinsic['fy']
            cx_depth = self.depth_intrinsic['cx']
            cy_depth = self.depth_intrinsic['cy']
        else:
            fx_depth, fy_depth, cx_depth, cy_depth = fx_rgb, fy_rgb, cx_rgb, cy_rgb

        # 1. 创建深度图像的像素坐标网格
        # yy, xx 分别是每个像素的行(row)和列(col)坐标
        yy, xx = np.mgrid[0:self.src_h, 0:self.src_w]

        # 2. 找到所有有效的深度点 (深度值 > 0)
        valid_mask = dp_img > 0

        # 提取所有有效点的坐标和深度值
        dp_vals = dp_img[valid_mask]
        x_coords = xx[valid_mask]
        y_coords = yy[valid_mask]

        # 3. 将所有有效像素点一次性转换为3D坐标 (在depth相机坐标系中)
        z_depth = dp_vals * depth_scale
        x_depth = (x_coords - cx_depth) * z_depth / fx_depth
        y_depth = (y_coords - cy_depth) * z_depth / fy_depth

        # 将所有点组合成一个 (3, N) 的矩阵，N是有效点数
        points_depth = np.vstack((x_depth, y_depth, z_depth))

        # 4. 将所有3D点一次性变换到RGB相机坐标系
        # self.T_ir2rgb.reshape(3, 1) 利用广播(broadcasting)机制加到每个点上
        points_rgb = self.R_ir2rgb @ points_depth + self.T_ir2rgb.reshape(3, 1)

        # 5. 将所有3D点一次性投影到RGB图像平面
        x_rgb, y_rgb, z_rgb = points_rgb[0], points_rgb[1], points_rgb[2]

        # 过滤掉深度为负或零的点，防止除零
        valid_projection_mask = z_rgb > 1e-6
        if not np.any(valid_projection_mask):
            # 如果没有有效的点可以投影，直接返回空图像
            align_dp_img = self.dp_extend[self.extend_h_pixel:self.extend_buf_h - self.extend_h_pixel,
                           self.extend_w_pixel:self.extend_buf_w - self.extend_w_pixel]
            return align_dp_img.copy()

        # 只处理可以有效投影的点
        x_rgb = x_rgb[valid_projection_mask]
        y_rgb = y_rgb[valid_projection_mask]
        z_rgb = z_rgb[valid_projection_mask]
        pixel_values = dp_vals[valid_projection_mask]  # 确保深度值也同步过滤

        # 计算投影后的像素坐标 (u, v)
        u_rgb = fx_rgb * x_rgb / z_rgb + cx_rgb
        v_rgb = fy_rgb * y_rgb / z_rgb + cy_rgb

        # 四舍五入到最近的整数像素坐标
        new_x = (u_rgb + 0.5).astype(np.int32)
        new_y = (v_rgb + 0.5).astype(np.int32)

        # 6. 应用旋转变换 (同样是矢量化操作)
        if self.rotate_angle == 90:
            new_x, new_y = new_y, new_x
            new_y = self.dst_h - 1 - new_y
        elif self.rotate_angle == 180:
            new_x = self.dst_w - 1 - new_x
            new_y = self.dst_h - 1 - new_y
        elif self.rotate_angle == 270:
            new_x, new_y = new_y, new_x
            new_x = self.dst_w - 1 - new_x

        # 7. 边界检查 (创建最终的有效mask)
        final_mask = (new_x >= 0) & (new_x < self.dst_w) & (new_y >= 0) & (new_y < self.dst_h)

        # 过滤掉出界的点
        final_x = new_x[final_mask]
        final_y = new_y[final_mask]
        final_pixel_values = pixel_values[final_mask]

        # 8. 将深度值写入扩展缓冲区 (使用高级索引)
        # 这是最关键的一步，一次性将所有有效点的值赋给目标图像
        buf_x = self.extend_w_pixel + final_x
        buf_y = self.extend_h_pixel + final_y
        self.dp_extend[buf_y, buf_x] = final_pixel_values

        # 提取最终结果
        align_dp_img = self.dp_extend[self.extend_h_pixel:self.extend_buf_h - self.extend_h_pixel,
                       self.extend_w_pixel:self.extend_buf_w - self.extend_w_pixel]

        return align_dp_img.copy()


class SaveWorker(QObject):
    finished = pyqtSignal()
    log_message = pyqtSignal(str)

    def __init__(self, images_dict: dict):
        super().__init__()
        self.logger = init_logger(RMSL_LOG_PATH)

        self.utils = UtilsBasic()
        self.save_dir = str(PROJECT_ROOT / 'save')
        self.utils.save_path_check(self.save_dir)

        """-------------------------------------------------------------------------------------"""
        # Get config info
        self.config_file_path = str(PROJECT_ROOT / 'config.json')
        config_info = self.utils.read_json(self.config_file_path)

        self.work_mode = config_info.get('work_mode', 'rgbd')
        self.save_rgb_pointcloud = config_info.get('save_rgb_pointcloud', 0)

        # Depth Range
        self.min_depth_dis = config_info.get('min_distance_mm', 100)  # mm
        self.max_depth_dis = config_info.get('max_distance_mm', 5000)  # mm

        """-------------------------------------------------------------------------------------"""

        self.rgb_img = images_dict.get('rgb', None)
        self.disp_img = images_dict.get('disp', None)
        self.depth_img = images_dict.get('depth', None)
        self.pse_color_img = images_dict.get('pse_color', None)  # add pse color of depth

        # debug mode
        self.ir_l_img = images_dict.get('ir_l', None)
        self.ir_r_img = images_dict.get('ir_r', None)
        self.diff_img = images_dict.get('diff', None)  # pse color of disp diff

        """-------------------------------------------------------------------------------------"""
        # Get intrinsic params
        self.calib_yaml_path = str(PROJECT_ROOT / 'config/calibration.yaml')
        disp_h, disp_w = self.disp_img.shape[:2]
        rgb_h, rgb_w = self.rgb_img.shape[:2]
        self.calib_yaml_name = f"calib_param_ir{disp_w}x{disp_h}_rgb{rgb_w}x{rgb_h}.yaml"
        self.calib_yaml_evb_path = f'/oem/usr/share/calib_data/{self.calib_yaml_name}'
        if not self.utils.is_file_on_device(self.calib_yaml_evb_path):
            self.calib_yaml_name = f"calib_param_{disp_w}x{disp_h}.yaml"
            self.calib_yaml_evb_path = f'/oem/usr/share/calib_data/{self.calib_yaml_name}'
            self.logger.log_to_file(f' [Info] Change calib file to: {self.calib_yaml_evb_path}')

        if not self.utils.pull_file_from_single_device(self.calib_yaml_evb_path, self.calib_yaml_path):
            raise FileNotFoundError("Check device connect and calib yaml!")

        parser = ParserCamIntrinsic(self.calib_yaml_path)
        self.intrinsic_params = parser.get_all_camera_intrinsic()
        self.fx = self.intrinsic_params['l_intrinsic']['fx']
        self.fy = self.intrinsic_params['l_intrinsic']['fy']
        self.cx = self.intrinsic_params['l_intrinsic']['cx']
        self.cy = self.intrinsic_params['l_intrinsic']['cy']
        self.module_category = self.intrinsic_params['category']

        """-------------------------------------------------------------------------------------"""

        if self.save_rgb_pointcloud == 1:
            if self.module_category == 'rgbd':
                # Get r_coeff and t_coeff
                self.r_coeff = self.intrinsic_params['r_coeff']
                self.t_coeff = self.intrinsic_params['t_coeff']

                self.aligner = RMSLAlign(src_w=self.depth_img.shape[1],
                                         src_h=self.depth_img.shape[0],
                                         dst_w=self.rgb_img.shape[1],
                                         dst_h=self.rgb_img.shape[0],
                                         extend_w_pixels=256,
                                         extend_h_pixels=256,
                                         rotate_angle=0)
                self.aligner.create_lut(self.r_coeff)
                # self.aligner = RMSLAlignNew(src_w=self.depth_img.shape[1],
                #                             src_h=self.depth_img.shape[0],
                #                             dst_w=self.rgb_img.shape[1],
                #                             dst_h=self.rgb_img.shape[0],
                #                             extend_w_pixels=256,
                #                             extend_h_pixels=256,
                #                             rotate_angle=0)
                # self.aligner.set_calibration_params(self.intrinsic_params)

    @pyqtSlot()
    def download_image_and_pointcloud(self):
        try:
            timestamp = UtilsBasic.get_formatted_timestamp()
            if self.rgb_img is not None:
                rgb_img_name = f'rgb_{timestamp}.png'
                rgb_img_path = os.path.join(self.save_dir, rgb_img_name)
                rgb_bgr = cv2.cvtColor(self.rgb_img, cv2.COLOR_RGB2BGR)
                cv2.imwrite(rgb_img_path, rgb_bgr)
                self.log_message.emit(f' [Info] Downloaded RGB image to dir: {self.save_dir}')
            if self.work_mode == 'rgbd+2ir':
                if self.ir_l_img is not None:
                    ir_l_img_name = f'ir_l_{timestamp}.png'
                    ir_l_img_path = os.path.join(self.save_dir, ir_l_img_name)
                    ir_l_bgr = cv2.cvtColor(self.ir_l_img, cv2.COLOR_RGB2BGR)
                    cv2.imwrite(ir_l_img_path, ir_l_bgr)
                if self.ir_r_img is not None:
                    ir_r_img_name = f'ir_r_{timestamp}.png'
                    ir_r_img_path = os.path.join(self.save_dir, ir_r_img_name)
                    ir_r_bgr = cv2.cvtColor(self.ir_r_img, cv2.COLOR_RGB2BGR)
                    cv2.imwrite(ir_r_img_path, ir_r_bgr)
                self.log_message.emit(f' [Info] Downloaded IR image to dir: {self.save_dir}.')

                if self.diff_img is not None:
                    diff_img_name = f'disp_pse_diff_{timestamp}.png'
                    diff_img_path = os.path.join(self.save_dir, diff_img_name)
                    diff_bgr = cv2.cvtColor(self.diff_img, cv2.COLOR_RGB2BGR)
                    cv2.imwrite(diff_img_path, diff_bgr)
                    self.log_message.emit(f' [Info] Downloaded PseDispDiff image to dir: {self.save_dir}.')
            if self.disp_img is not None:
                disp_name = f'disp_{timestamp}.png'
                disp_path = os.path.join(self.save_dir, disp_name)
                cv2.imwrite(disp_path, self.disp_img)
                self.log_message.emit(f' [Info] Downloaded disp image to dir: {self.save_dir}')
            if self.depth_img is not None:
                dp_name = f'depth_{timestamp}.png'
                dp_path = os.path.join(self.save_dir, dp_name)
                cv2.imwrite(dp_path, self.depth_img)
                self.log_message.emit(f' [Info] Downloaded depth image to dir: {self.save_dir}')

                pse_color_name = f'depth_pse_color_{timestamp}.png'
                pse_color_path = os.path.join(self.save_dir, pse_color_name)
                cv2.imwrite(pse_color_path, self.pse_color_img)
                self.log_message.emit(f' [Info] Downloaded pse color image to dir: {self.save_dir}')

                pc_name = f'pointcloud_{timestamp}.ply'
                pc_path = os.path.join(self.save_dir, pc_name)
                if self.save_rgb_pointcloud == 1:
                    if self.module_category == 'rgbd':
                        self.depth_img = self.aligner.align_dp_to_rgb_optimized(self.depth_img, self.t_coeff)
                        # self.depth_img = self.aligner.align_dp_to_rgb_vectorized(self.depth_img)
                        pc_save = self.utils.generate_rgb_pointcloud(self.rgb_img,
                                                                     self.depth_img,
                                                                     self.intrinsic_params['rgb_intrinsic'],
                                                                     depth_ratio=1.0,
                                                                     dis_near=self.min_depth_dis,
                                                                     dis_far=self.max_depth_dis)
                    else:
                        pc_save = self.utils.generate_rgb_pointcloud(self.rgb_img,
                                                                     self.depth_img,
                                                                     self.intrinsic_params['l_intrinsic'],
                                                                     depth_ratio=1.0,
                                                                     dis_near=self.min_depth_dis,
                                                                     dis_far=self.max_depth_dis)
                    self.utils.save_rgb_pointcloud_fast(pc_save, pc_path)
                else:
                    _, pc_save, _ = self.utils.generate_pointcloud_by_depth(self.depth_img,
                                                                            self.fx,
                                                                            self.fy,
                                                                            self.cx,
                                                                            self.cy,
                                                                            self.depth_img.shape[0],
                                                                            self.depth_img.shape[1],
                                                                            self.min_depth_dis,
                                                                            self.max_depth_dis,
                                                                            en_valid_roi=False,
                                                                            en_cen_roi=False)
                    self.utils.save_pointcloud_fast(pc_save, pc_path)
                self.log_message.emit(f' [Info] Downloaded pointcloud to dir: {self.save_dir}')
        except Exception as e:
            self.log_message.emit(f' [Error] Failed save image: {e}')
        finally:
            # 任务完成，发射finished信号
            self.finished.emit()


class FrameProcessor(QThread):
    processing_rgb_finished = pyqtSignal(object)
    processing_dp_finished = pyqtSignal(object)

    def __init__(self):
        super().__init__()
        self.processing_queue = queue.Queue()
        self.is_running = True
        self.logger = init_logger(RMSL_LOG_PATH)
        self.utils = UtilsBasic()
        self.is_parsed = False

        """-------------------------------------------------------------------------------------"""
        # Get config info
        self.config_file_path = str(PROJECT_ROOT / 'config.json')
        config_info = self.utils.read_json(self.config_file_path)
        self.view_mode = config_info['view_mode']
        self.work_mode = config_info.get('work_mode', 'rgbd')
        self.visual_mode = config_info['visual_mode']
        if self.visual_mode.lower() == "normal":
            # 新模式
            self.uvc_buf_w = 1080
            self.uvc_buf_h = 960  # 1280 * 1.5 // 2
            self.rgb_w = 1280
            self.rgb_h = 1080
            self.disp_w = 640
            self.disp_h = 352
        else:
            self.rgb_w = 1280
            self.rgb_h = 1080
            self.disp_w = 640
            self.disp_h = 416

            if self.work_mode.lower() == '2rgbd':
                """
                ----------------------------------------
                | RGB  (NV12)  | 1280 × 1.5 = 1920 row |
                | DISP (RAW)   |               320 row |
                ----------------------------------------
                整张 buffer         1080 x 2240（垂直拼接）
                """
                self.uvc_buf_w = 1080
                self.uvc_buf_h = 2240
            elif self.work_mode.lower() == 'rgbd+2ir':
                """
                ----------------------------------------
                | IR-L (NV12)  | 1280 × 1.5 = 1920 row |
                | IR-R (NV12)  | 1280 × 1.5 = 1920 row |
                | RGB  (NV12)  | 1280 × 1.5 = 1920 row |
                | DISP (RAW)   |               320 row |
                ----------------------------------------
                整张 buffer         1080 x 3200（垂直拼接）
                """
                self.uvc_buf_w = 1080
                self.uvc_buf_h = 3200
            elif self.work_mode.lower() == 'rgbd':
                """
                ----------------------------------------
                | RGB  (NV12)  | 1280 × 1.5 = 1920 row |
                | DISP (RAW)   |               320 row |
                ----------------------------------------
                整张 buffer         1080 x 2240（垂直拼接）
                """
                self.uvc_buf_w = 1280
                self.uvc_buf_h = 1080
            else:
                self.logger.log_to_file(f" [Error] [FrameProcessor] Mode {self.work_mode} is not supported, "
                                        f"Convert to RGBD mode, ['rgbd', 'rgbd-2ir', '2rgbd'].")
                self.uvc_buf_w = 1280
                self.uvc_buf_h = 1080
        # Depth Range
        self.min_depth_dis = config_info.get('min_distance_mm', 100)  # mm
        self.max_depth_dis = config_info.get('max_distance_mm', 5000)  # mm
        self.colorizer_mode = config_info.get('colorize_mode', 'jet_red2blue')

        # self.colorizer = Colorizer(mode=self.colorizer_mode)
        self.colorizer = ColorizerNew(mode=self.colorizer_mode)

        self.rgb_img = None  # rgb
        self.ir_l_img = None  # speckle-left
        self.ir_r_img = None  # speckle-right
        self.last_disp = None
        self.diff_disp = None
        self.disp_img = None  # disp
        self.depth_img = None  # depth
        self.pse_color_img = None  # pse color of depth

        self.calib_yaml_path = str(PROJECT_ROOT / 'config/calibration.yaml')
        self.calib_yaml_name = f"calib_param_ir{self.disp_w}x{self.disp_h}_rgb{self.rgb_w}x{self.rgb_h}.yaml"
        self.calib_yaml_evb_path = f'/oem/usr/share/calib_data/{self.calib_yaml_name}'
        if not self.utils.is_file_on_device(self.calib_yaml_evb_path):
            self.calib_yaml_name = f"calib_param_{self.disp_w}x{self.disp_h}.yaml"
            self.calib_yaml_evb_path = f'/oem/usr/share/calib_data/{self.calib_yaml_name}'
            self.logger.log_to_file(f' [Info] Change calib file to: {self.calib_yaml_evb_path}')

        if not self.utils.pull_file_from_single_device(self.calib_yaml_evb_path, self.calib_yaml_path):
            raise FileNotFoundError("Check device connect and calib yaml!")

        parser = ParserCamIntrinsic(self.calib_yaml_path)
        self.intrinsic_params = parser.get_all_camera_intrinsic()
        self.q23 = self.intrinsic_params['q23']
        self.q32 = self.intrinsic_params['q32']
        self.q33 = self.intrinsic_params['q33']

        if self.intrinsic_params['category'] == 'rgbd':
            # initial rgb distort
            new_camera_matrix, roi = cv2.getOptimalNewCameraMatrix(self.intrinsic_params['rgb_intrinsic']['K'],
                                                                   self.intrinsic_params['rgb_distortion']['D'],
                                                                   (self.intrinsic_params['rgb_width'], self.intrinsic_params['rgb_height']),
                                                                   alpha=0,
                                                                   newImgSize=(self.intrinsic_params['rgb_width'], self.intrinsic_params['rgb_height']))

            # 计算映射关系
            self.mapx, self.mapy = cv2.initUndistortRectifyMap(self.intrinsic_params['rgb_intrinsic']['K'],
                                                               self.intrinsic_params['rgb_distortion']['D'],
                                                               None,
                                                               new_camera_matrix,
                                                               (self.intrinsic_params['rgb_width'],
                                                                self.intrinsic_params['rgb_height']),
                                                               cv2.CV_32FC1)
        else:
            # 2RGB
            self.mapx, self.mapy = cv2.initUndistortRectifyMap(self.intrinsic_params['l_intrinsic']['K_l'],
                                                               self.intrinsic_params['l_distortion']['D_l'],
                                                               self.intrinsic_params['R1'],
                                                               self.intrinsic_params['P1'],
                                                               (self.intrinsic_params['l_width'],
                                                                self.intrinsic_params['l_height']),
                                                               cv2.CV_32FC1)

        """-------------------------------------------------------------------------------------"""

    def add_frame(self, frame_info):
        """
            添加待处理的帧数据
        :param frame_info:
        :return:
        """
        if self.processing_queue.qsize() > 2:
            while not self.processing_queue.empty():
                try:
                    self.processing_queue.get_nowait()
                except queue.Empty:
                    break
        self.processing_queue.put(frame_info)

    def run(self):
        while self.is_running:
            try:
                # 最多等待1s
                frame_data = self.processing_queue.get(timeout=1.0)
                expected_size = self.uvc_buf_w * self.uvc_buf_h * 2
                frame_array = np.frombuffer(frame_data, dtype=np.uint8)
                if len(frame_array) != expected_size:
                    self.logger.log_to_file(
                        f' [Warn] [FrameProcessor] Frame size mismatch: {len(frame_array) - expected_size}')
                    return

                # Initial
                self.rgb_img = None  # rgb
                self.ir_l_img = None  # speckle-left
                self.ir_r_img = None  # speckle-right
                self.disp_img = None  # disp
                self.depth_img = None  # depth
                self.pse_color_img = None  # pse color of depth

                if self.visual_mode.lower() == "normal":
                    # UVC HEAD MODE
                    header = self.parse_uvc_head(frame_array)
                    head_size = 28
                    if header['magic'] != 0xA55A:
                        self.logger.log_to_file(f" [Warn] UVC Head parser error!")
                        continue
                    timestamp = int(header['timestamp'] / 1000)  # us to ms
                    # self.logger.log_to_file(f" [Info] ----------- uvc_head_v1 -----------")
                    # self.logger.log_to_file(f" [Info]   🎉  magic:     0x{header['magic']:04X}")
                    # self.logger.log_to_file(f" [Info]   🎉  version:   {header['version']}")
                    # self.logger.log_to_file(f" [Info]   🎉  size:      {header['size']}")
                    # self.logger.log_to_file(f" [Info]   🎉  type:      {header['type']}")
                    # self.logger.log_to_file(f" [Info]   🎉  width:     {header['width']}")
                    # self.logger.log_to_file(f" [Info]   🎉  height:    {header['height']}")
                    # self.logger.log_to_file(f" [Info]   🎉  sequence:  {header['sequence']}")
                    # self.logger.log_to_file(f" [Info]   🎉  timestamp: {header['timestamp']}us")
                    # self.logger.log_to_file(f" [Info]   🎉  reserve:   {header['reserve']}")
                    # self.logger.log_to_file(f" ------------------------------------------")

                    if header['type'] == 1:  # rgb frame
                        rgb_frame_data = frame_array[0: expected_size]
                        rgb_frame_data = rgb_frame_data.reshape(self.rgb_w * 3 // 2, self.rgb_h)
                        rgb_frames_rgb = cv2.cvtColor(rgb_frame_data, cv2.COLOR_YUV2RGB_NV12)

                        # clear head
                        rgb_frames_rgb[0, : head_size] = rgb_frames_rgb[1, : head_size]
                        self.rgb_img = cv2.rotate(rgb_frames_rgb, cv2.ROTATE_90_CLOCKWISE)
                        self.ir_l_img, self.ir_r_img, self.diff_disp = None, None, None
                        if self.intrinsic_params['category'] == '2rgb':
                            rgb_w = self.intrinsic_params['l_width']
                            rgb_h = self.intrinsic_params['l_height']
                        else:
                            rgb_w = self.intrinsic_params['rgb_width']
                            rgb_h = self.intrinsic_params['rgb_height']
                        if rgb_w != self.rgb_w and rgb_h != self.rgb_h:
                            # 获取抽行的比例
                            step = self.rgb_w // rgb_w
                            scaled_image = self.rgb_img[::step, ::step]
                            crop_total = int(self.rgb_h / step - rgb_h)  # 188
                            crop_top = crop_total // 2  # 94
                            crop_bottom = crop_total - crop_top  # 94

                            # 步骤3：裁剪图像
                            self.rgb_img = scaled_image[crop_top: int(self.rgb_h / step) - crop_bottom, :]

                        self.rgb_img = cv2.remap(self.rgb_img, self.mapx, self.mapy, cv2.INTER_LINEAR)
                        result_rgb = {
                            'rgb': self.rgb_img,
                            'ir_l': self.ir_l_img,
                            'ir_r': self.ir_r_img,
                            'diff': self.diff_disp,
                            'timestamp': timestamp
                        }
                        self.processing_rgb_finished.emit(result_rgb)

                    elif header['type'] == 2:  # dp
                        depth_w = header['width']
                        depth_h = header['height']
                        dp_frame_data = frame_array[0:depth_h * depth_w * 2]
                        dp_frame_16bit = np.frombuffer(dp_frame_data, dtype=np.uint16).copy()
                        dp_frame_16bit = dp_frame_16bit.reshape((depth_h, depth_w))

                        # clear head
                        dp_frame_16bit[0, : (head_size // 2)] = dp_frame_16bit[1, : (head_size // 2)]
                        self.disp_img = dp_frame_16bit

                        """-------------------------------------------------------------------------------------"""
                        if not self.is_parsed:
                            # Get intrinsic params
                            disp_h, disp_w = self.disp_img.shape[:2]
                            if disp_h != self.disp_h or disp_w != self.disp_w:
                                self.calib_yaml_path = str(PROJECT_ROOT / 'config/calibration.yaml')
                                self.calib_yaml_name = f"calib_param_ir{disp_w}x{disp_h}_rgb{self.rgb_w}x{self.rgb_h}.yaml"
                                self.calib_yaml_evb_path = f'/oem/usr/share/calib_data/{self.calib_yaml_name}'
                                if not self.utils.is_file_on_device(self.calib_yaml_evb_path):
                                    self.calib_yaml_name = f"calib_param_{disp_w}x{disp_h}.yaml"
                                    self.calib_yaml_evb_path = f'/oem/usr/share/calib_data/{self.calib_yaml_name}'
                                    self.logger.log_to_file(f' [Info] Change calib file to: {self.calib_yaml_evb_path}')

                                if not self.utils.pull_file_from_single_device(self.calib_yaml_evb_path, self.calib_yaml_path):
                                    raise FileNotFoundError("Check device connect and calib yaml!")

                                parser = ParserCamIntrinsic(self.calib_yaml_path)
                                self.intrinsic_params = parser.get_all_camera_intrinsic()
                                self.q23 = self.intrinsic_params['q23']
                                self.q32 = self.intrinsic_params['q32']
                                self.q33 = self.intrinsic_params['q33']

                        """-------------------------------------------------------------------------------------"""

                        if self.view_mode == 'depth' and self.disp_img is not None:
                            # convert disp to depth
                            self.depth_img = self.utils.disp_to_depth_vectorized(self.disp_img, self.q23, self.q32,
                                                                                 self.q33,
                                                                                 subpixel_value=64,
                                                                                 zoom_ratio=1.0)
                            self.pse_color_img = self.colorizer.colorize(self.depth_img, depth_range=(
                                self.min_depth_dis, self.max_depth_dis))
                        elif self.disp_img is not None:
                            # clip range use default range.
                            self.pse_color_img = self.colorizer.adaptive_colorize(self.disp_img)
                        else:
                            self.pse_color_img = None

                        result = {
                            'disp': self.disp_img,
                            'depth': self.depth_img,
                            'pse_color': self.pse_color_img,
                            'timestamp': timestamp
                        }

                        self.processing_dp_finished.emit(result)
                    else:
                        self.logger.log_to_file(f" [Error] Header mode just support [1: rgb, 2: disp] ")
                else:
                    rgb_img_size = self.rgb_w * self.rgb_h * 3 // 2  # nv12
                    depth_img_size = self.disp_w * self.disp_h * 2  # uint16

                    # Get Result
                    result_rgb = self.process_rgb_frame(frame_array, rgb_img_size, depth_img_size)
                    self.processing_rgb_finished.emit(result_rgb)

                    result_depth = self.process_depth_frame(frame_array, rgb_img_size, depth_img_size)
                    self.processing_dp_finished.emit(result_depth)
            except queue.Empty:
                continue
            except Exception as e:
                self.logger.log_to_file(f" [Info] Failed to process frame buffer: {e}")

    def parse_uvc_head(self, frame_data: Union[bytes, np.ndarray]) -> Dict[str, Any]:
        """
        frame_data 完整的帧数据，至少 48 字节 {
            'magic': int,
            'version': int,
            'size': int,
            'type': int,
            'width': int,
            'height': int,
            'sequence': int,
            'timestamp': int,
            'reserve': int
        }
        """
        if isinstance(frame_data, np.ndarray):
            buf = frame_data[:28].tobytes()
        else:
            buf = bytes(frame_data)[:28]

        if len(buf) < 28:
            self.logger.log_to_file(f" [Error] frame_data size not match 28 Bytes，Can not parser struct: uvc_head_v1")
            raise ValueError("frame_data 不足 28 字节，无法解析 uvc_head_v1_t")

        fields = struct.unpack('<HHHHHH I Q I', buf)
        keys = ['magic', 'version', 'size', 'type',
                'width', 'height', 'sequence', 'timestamp', 'reserve']
        return dict(zip(keys, fields))

    def process_rgb_frame(self, frame_array, rgb_img_size, depth_img_size):
        if self.work_mode.lower() == 'rgbd' or self.work_mode.lower() == '2rgbd':
            # process rgb
            rgb_frame_data = frame_array[0: rgb_img_size]
            rgb_frame_data = rgb_frame_data.reshape(self.rgb_w * 3 // 2, self.rgb_h)
            rgb_frames_rgb = cv2.cvtColor(rgb_frame_data, cv2.COLOR_YUV2RGB_NV12)
            self.rgb_img = cv2.rotate(rgb_frames_rgb, cv2.ROTATE_90_CLOCKWISE)
            self.rgb_img = cv2.remap(self.rgb_img, self.mapx, self.mapy, cv2.INTER_LINEAR)
            self.ir_l_img = None
            self.ir_r_img = None
            self.diff_disp = None
        elif self.work_mode.lower() == 'rgbd+2ir':
            total_frames = len(frame_array) // rgb_img_size  # 计算实际帧数
            frames = []
            for i in range(total_frames):
                start = i * rgb_img_size
                end = (i + 1) * rgb_img_size
                frame_data = frame_array[start:end]
                frames.append(frame_data.reshape((self.rgb_w * 3 // 2, self.rgb_h)))
            frames_rgb = [cv2.cvtColor(frame, cv2.COLOR_YUV2RGB_NV12) for frame in frames]

            # Gen diff
            total_frames = len(frame_array) // rgb_img_size
            dp_offset = total_frames * rgb_img_size
            disp_frame_data = frame_array[dp_offset:(dp_offset + depth_img_size)]
            dp_frame_16bit = np.frombuffer(disp_frame_data, dtype=np.uint16)
            disp_frame = dp_frame_16bit.reshape((self.disp_h, self.disp_w))

            if self.last_disp is not None:
                # 构造 mask：两者都非零的位置为 True
                valid_mask = (disp_frame != 0) & (self.last_disp != 0)
                diff = disp_frame - self.last_disp
                diff[~valid_mask] = 0
                self.diff_disp = self.colorizer.colorize(diff, depth_range=(1, 256), normalize=True)

            self.last_disp = disp_frame

            # u8
            self.ir_l_img = cv2.rotate(frames_rgb[0], cv2.ROTATE_90_CLOCKWISE)
            self.ir_r_img = cv2.rotate(frames_rgb[1], cv2.ROTATE_90_CLOCKWISE)
            self.rgb_img = cv2.rotate(frames_rgb[2], cv2.ROTATE_90_CLOCKWISE)
        else:
            self.logger.log_to_file(f" [Error] [FrameProcessor] Mode {self.work_mode} is not supported.")
            self.rgb_img = None
            self.ir_l_img = None
            self.ir_r_img = None
            self.diff_disp = None

        result_rgb = {
            'rgb': self.rgb_img,
            'ir_l': self.ir_l_img,
            'ir_r': self.ir_r_img,
            'diff': self.diff_disp,
            'timestamp': 0
        }

        return result_rgb

    def process_depth_frame(self, frame_array, rgb_img_size, depth_img_size):
        if self.work_mode.lower() == 'rgbd' or self.work_mode.lower() == '2rgbd':
            # process disp
            disp_frame_data = frame_array[rgb_img_size: (rgb_img_size + depth_img_size)]
            dp_frame_16bit = np.frombuffer(disp_frame_data, dtype=np.uint16)
            self.disp_img = dp_frame_16bit.reshape((self.disp_h, self.disp_w))
        elif self.work_mode.lower() == 'rgbd+2ir':
            # get depth buffer
            total_frames = len(frame_array) // rgb_img_size
            dp_offset = total_frames * rgb_img_size
            disp_frame_data = frame_array[dp_offset:(dp_offset + depth_img_size)]

            # process disp
            dp_frame_16bit = np.frombuffer(disp_frame_data, dtype=np.uint16)
            dp_frame_16bit = dp_frame_16bit.reshape((self.disp_h, self.disp_w))
            self.disp_img = dp_frame_16bit
        else:
            self.logger.log(f' [Warn] [FrameProcessor] Unknown work_mode: {work_mode}, Failed loading frame.')
            self.disp_img = None

        if self.view_mode == 'depth' and self.disp_img is not None:
            # convert disp to depth
            self.depth_img = self.utils.disp_to_depth_vectorized(self.disp_img, self.q23, self.q32, self.q33,
                                                                 subpixel_value=64,
                                                                 zoom_ratio=1.0)
            self.pse_color_img = self.colorizer.colorize(self.depth_img,
                                                         depth_range=(self.min_depth_dis, self.max_depth_dis))
        elif self.disp_img is not None:
            # clip range use default range.
            self.pse_color_img = self.colorizer.adaptive_colorize(self.disp_img)
        else:
            self.pse_color_img = None

        result = {
            'disp': self.disp_img,
            'depth': self.depth_img,
            'pse_color': self.pse_color_img,
            'timestamp': 0
        }

        return result

    def stop(self):
        self.is_running = False
        self.quit()
        self.wait()


class RMSLViewer(QWidget, Ui_RMSLViewer):
    def __init__(self):
        super(RMSLViewer, self).__init__()
        self.logger = init_logger(RMSL_LOG_PATH)
        self.config_file_path = str(PROJECT_ROOT / 'config.json')

        self.utils = UtilsBasic()
        self.uvc_fps = 30  # default

        """-------------------------------------------------------------------------------------"""
        # icons change
        self.icon_open_inactive = QIcon(":/image/image/stop.png")
        self.icon_open_active = QIcon(":/image/image/start.png")
        self.icon_lock_inactive = QIcon(":/image/image/unlock.png")
        self.icon_lock_active = QIcon(":/image/image/lock.png")
        self.icon_3d_inactive = QIcon(":/image/image/2d.png")
        self.icon_3d_active = QIcon(":/image/image/3d.png")

        # status
        self.is_started = False
        self.is_locked = False
        self.is_3d = False

        # global status
        self.is_running = False  # Ensure Capture Thread running

        """-------------------------------------------------------------------------------------"""
        # cache and save
        self.rgb_img = None  # rgb
        self.ir_l_img = None  # speckle-left
        self.ir_r_img = None  # speckle-right
        self.diff_img = None  # disp diff
        self.disp_img = None  # disp
        self.depth_img = None  # depth
        self.pse_color_img = None  # pse color of depth
        self.point_cloud = None  # pointcloud
        self.timestamp_rgb = 0
        self.timestamp_disp = 0
        self.matcher = SensorDataMatcher()

        """-------------------------------------------------------------------------------------"""
        # beautiful message box
        self.message_box_style = MessageBox_Style
        self.setup_message_box_style()

        """-------------------------------------------------------------------------------------"""
        # ui initial
        # scene and image item, for preview.
        self.scene_rgb = QGraphicsScene()
        self.scene_depth = QGraphicsScene()
        self.image_item_rgb = QGraphicsPixmapItem()
        self.image_item_depth = QGraphicsPixmapItem()

        self.setupUi(self)
        self.button_init()
        self.linedit_init()
        self.textbrowser_init()
        self.view_init()

        """-------------------------------------------------------------------------------------"""
        # bind logger
        self.logger.log_signal.connect(self.log_update)

        """-------------------------------------------------------------------------------------"""
        # Get config info
        config_info = self.utils.read_json(self.config_file_path)
        self.work_mode = config_info.get('work_mode', 'rgbd')
        self.visual_mode = config_info.get('visual_mode', 'normal')
        if self.visual_mode.lower() == "normal":
            # 新模式
            self.uvc_buf_w = 1080
            self.uvc_buf_h = 960  # 1280 * 1.5 // 2
        else:
            if self.work_mode.lower() == '2rgbd':
                """
                ----------------------------------------
                | RGB  (NV12)  | 1280 × 1.5 = 1920 row |
                | DISP (RAW)   |               320 row |
                ----------------------------------------
                整张 buffer         1080 x 2240（垂直拼接）
                """
                self.uvc_buf_w = 1080
                self.uvc_buf_h = 2240
            elif self.work_mode.lower() == 'rgbd+2ir':
                """
                ----------------------------------------
                | IR-L (NV12)  | 1280 × 1.5 = 1920 row |
                | IR-R (NV12)  | 1280 × 1.5 = 1920 row |
                | RGB  (NV12)  | 1280 × 1.5 = 1920 row |
                | DISP (RAW)   |               320 row |
                ----------------------------------------
                整张 buffer         1080 x 3200（垂直拼接）
                """
                self.uvc_buf_w = 1080
                self.uvc_buf_h = 3200
            elif self.work_mode.lower() == 'rgbd':
                """
                ----------------------------------------
                | RGB  (NV12)  | 1280 × 1.5 = 1920 row |
                | DISP (RAW)   |               320 row |
                ----------------------------------------
                整张 buffer         1080 x 2240（垂直拼接）
                """
                self.uvc_buf_w = 1280
                self.uvc_buf_h = 1080
            else:
                self.logger.log_to_file(f" [Error] [FrameProcessor] Mode {self.work_mode} is not supported, "
                                        f"Convert to RGBD mode, ['rgbd', 'rgbd-2ir', '2rgbd'].")
                self.uvc_buf_w = 1280
                self.uvc_buf_h = 1080
        # uvc
        self.uvc_name = "UVC Camera"

        # self.calib_yaml_evb_path = '/oem/usr/share/calib_param_640x416.yaml'
        # self.calib_yaml_path = str(PROJECT_ROOT / 'config/calibration.yaml')
        # if not self.utils.pull_file_from_single_device(self.calib_yaml_evb_path, self.calib_yaml_path):
        #     raise FileNotFoundError("Check device connect and calib yaml!")

        """-------------------------------------------------------------------------------------"""
        self.reader = None
        self.view_mode = config_info['view_mode']

        self.image_processor = FrameProcessor()
        self.image_processor.processing_rgb_finished.connect(self.on_processing_rgb_finished)
        self.image_processor.processing_dp_finished.connect(self.on_processing_dp_finished)
        self.image_processor.start()

        self.uvc_fps = config_info['fps']
        self.val_fps = int(1000 / config_info['fps'])

        # Ensure io, force val_fps < 10ms, Test 5ms
        if self.val_fps > 5:
            self.val_fps = 5
        self.timer = QTimer(self)
        self.timer.timeout.connect(self.process_frame_fast)
        self.timer.start(self.val_fps)

        self.show()

    def setup_message_box_style(self):
        QApplication.instance().setStyleSheet(QApplication.instance().styleSheet() + self.message_box_style)

    def log_update(self, msg):
        self.terminal_log.append(msg)
        scrollbar = self.terminal_log.verticalScrollBar()
        scrollbar.setValue(scrollbar.maximum())

    def button_init(self):
        self.pushButton_select_config_file.setEnabled(True)
        self.pushButton_select_config_file.clicked.connect(self.select_config_file)

        self.pushButton_start.setEnabled(True)
        self.pushButton_start.setIcon(self.icon_open_inactive)
        self.pushButton_start.clicked.connect(self.start_and_stop_process)

        self.pushButton_view_mode.setEnabled(True)
        self.pushButton_view_mode.setIcon(self.icon_3d_inactive)
        self.pushButton_view_mode.clicked.connect(self.view_mode_change)

        self.pushButton_lock.setEnabled(True)
        self.pushButton_lock.setIcon(self.icon_lock_inactive)
        self.pushButton_lock.clicked.connect(self.lock_and_unlock_view)

        self.pushButton_download.setEnabled(True)
        self.pushButton_download.clicked.connect(self.download_image_and_pointcloud)

    def linedit_init(self):
        self.lineedit_config_file_path.setText(str(self.config_file_path))
        self.lineedit_config_file_path.returnPressed.connect(self.update_config_file_path)

    def textbrowser_init(self):
        self.terminal_log.setReadOnly(True)
        self.terminal_log.setTextInteractionFlags(
            Qt.TextSelectableByMouse | Qt.TextSelectableByKeyboard
        )
        self.terminal_log.setOpenExternalLinks(True)
        self.terminal_log.setPlaceholderText("RMSLViewer Log terminal, waiting...")

    def view_init(self):
        self.view_rgb.setRenderHint(QPainter.SmoothPixmapTransform)
        self.view_rgb.setRenderHint(QPainter.Antialiasing)
        self.view_rgb.setOptimizationFlags(QGraphicsView.DontAdjustForAntialiasing)
        self.view_rgb.setOptimizationFlags(QGraphicsView.DontSavePainterState)
        self.view_rgb.setViewportUpdateMode(QGraphicsView.FullViewportUpdate)

        self.view_depth.setRenderHint(QPainter.SmoothPixmapTransform)
        self.view_depth.setRenderHint(QPainter.Antialiasing)
        self.view_depth.setOptimizationFlags(QGraphicsView.DontAdjustForAntialiasing)
        self.view_depth.setOptimizationFlags(QGraphicsView.DontSavePainterState)
        self.view_depth.setViewportUpdateMode(QGraphicsView.FullViewportUpdate)

        # center visual
        self.view_rgb.centerOn(self.image_item_rgb)
        self.view_rgb.setAlignment(Qt.AlignCenter)
        self.view_depth.centerOn(self.image_item_depth)
        self.view_depth.setAlignment(Qt.AlignCenter)

    def update_config_file_path(self):
        text = self.lineedit_config_file_path.text()
        if os.path.exists(text):
            self.config_file_path = text
        else:
            self.logger.log(f" [Warn] Config file(*.json): {text} not exist")
            QMessageBox.warning(self, 'Warning', f'Config file: {text} does not exist!')

    def select_config_file(self):
        config_json_path, _ = QFileDialog.getOpenFileName(self,
                                                          "Select config file(config.json)",
                                                          ".",
                                                          "Config files(*.json);;all files (*.*)")
        if not config_json_path or not os.path.exists(config_json_path):
            self.logger.log(f" [Error] File path: {config_json_path} not exists")
            QMessageBox.warning(self,
                                "File Not Found",
                                f"File path: {config_json_path} not exists")
            self.lineedit_iq_file_path.setText(str(PROJECT_ROOT / 'config.json'))
        else:
            self.config_file_path = config_json_path
            self.logger.log(f' [Info] Success select iq file path: {config_json_path}')

    def start_and_stop_process(self):
        if not self.is_started:
            # update icon
            self.pushButton_start.setIcon(self.icon_open_active)
            # load image to graphview
            self.start_process_uvc_buffer()
            self.is_started = True
            self.logger.log(f' [Info] Started uvc thread...')
        else:
            # update icon
            self.pushButton_start.setIcon(self.icon_open_inactive)
            # stop load image to graphview
            self.stop_process_uvc_buffer()
            self.is_started = False
            self.logger.log(f' [Info] Stopped uvc thread, waiting...')

    def start_process_uvc_buffer(self):
        # 开启数据流
        self.reader = UVCReader(
            camera_name='UVC Camera',
            width=self.uvc_buf_w,
            height=self.uvc_buf_h,
            fps=self.uvc_fps
        )
        self.reader.start()
        self.is_running = True

    def stop_process_uvc_buffer(self):
        if self.reader is not None and self.reader.is_alive():
            self.reader.stop()
        self.reader = None
        self.is_running = False

    def view_mode_change(self):
        if not self.is_3d:
            # enable vis-pointcloud mode
            self.is_3d = True
            self.pushButton_view_mode.setIcon(self.icon_3d_active)
            QMessageBox.warning(self, 'Warning', 'Current version not support.')
            self.logger.log(f" [Info] Convert to 3D-Points mode, Current version not support.")
        else:
            self.is_3d = False
            self.pushButton_view_mode.setIcon(self.icon_3d_inactive)
            self.logger.log(f" [Info] Convert to 2D-Image mode.")

    def lock_and_unlock_view(self):
        if not self.is_locked:
            # close timer, not flush ui
            self.timer.stop()
            self.is_locked = True
            self.pushButton_lock.setIcon(self.icon_lock_active)
            self.logger.log(f" [Info] Lock view.")
        else:
            self.timer.start(self.val_fps)
            self.is_locked = False
            self.pushButton_lock.setIcon(self.icon_lock_inactive)
            self.logger.log(f" [Info] Unlock view.")

    def download_image_and_pointcloud(self):
        images_dict = {'rgb': self.rgb_img,
                       'disp': self.disp_img,
                       'depth': self.depth_img,
                       'pse_color': self.pse_color_img,
                       'ir_l': self.ir_l_img,
                       'ir_r': self.ir_r_img,
                       'diff': self.diff_img}
        rgb_data, depth_data = self.matcher.find_best_match()
        if rgb_data is None or depth_data is None:
            self.logger.log_to_file(" [Warn] No matched data available")
        else:
            matched_rgb = rgb_data['img']
            matched_disp = depth_data['disp']
            matched_depth = depth_data['depth']
            matched_pse_color = depth_data['pse_color']
            rgb_timestamp = rgb_data['timestamp']
            depth_timestamp = depth_data['timestamp']

            time_diff = abs(depth_timestamp - rgb_timestamp)
            self.logger.log_to_file(f" [Info] Using matched data with time difference: {time_diff}ms")

            # update
            images_dict = {'rgb': matched_rgb,
                           'disp': matched_disp,
                           'depth': matched_depth,
                           'pse_color': matched_pse_color,
                           'ir_l': self.ir_l_img,
                           'ir_r': self.ir_r_img,
                           'diff': self.diff_img}

        self.pushButton_download.setEnabled(False)
        self.logger.log(f' [Info] Starting Download images and pointcloud.')

        self.thread = QThread()
        # 将当前时刻的图像数据传递给Worker
        self.worker = SaveWorker(images_dict)

        # 移动worker到新线程
        self.worker.moveToThread(self.thread)

        # 连接信号和槽
        # 1. 线程启动后，执行worker的耗时任务
        self.thread.started.connect(self.worker.download_image_and_pointcloud)

        # 2. worker完成后，发出finished信号
        self.worker.finished.connect(self.thread.quit)  # 任务完成后，请求线程退出
        self.worker.finished.connect(self.worker.deleteLater)  # 清理worker对象
        self.thread.finished.connect(self.thread.deleteLater)  # 线程退出后，清理线程对象
        self.worker.finished.connect(lambda: self.pushButton_download.setEnabled(True))  # 任务完成后恢复按钮
        self.worker.finished.connect(lambda: self.log_update(" [Info] Success download images and pointcloud."))

        # 3. 连接worker的日志信号到主线程的log_message槽
        self.worker.log_message.connect(self.log_update)

        # --- 步骤 5: 启动线程 ---
        self.thread.start()

    def process_frame_fast(self):
        if self.reader is not None and self.is_running:
            frames_received = 0
            frame_data = self.reader.get_frame()
            if frame_data:
                frames_received += 1

                self.image_processor.add_frame(frame_data)
        else:
            time.sleep(0.01)

    def on_processing_rgb_finished(self, result):
        self.rgb_img = result.get('rgb', None)
        self.ir_l_img = result.get('ir_l', None)
        self.ir_r_img = result.get('ir_r', None)
        self.timestamp_rgb = result.get('timestamp', 0)

        if self.rgb_img is not None:
            self.matcher.add_rgb_data(self.rgb_img, self.timestamp_rgb)

        if self.work_mode == 'rgbd+2ir':
            self.diff_img = result.get('diff', None)
            if self.diff_img is None:
                self.logger.log_to_file(" [Warn] Difference image not found.")
                self.diff_img = self.rgb_img[::2, ::2]

        if self.rgb_img is not None:
            if self.work_mode == 'rgbd+2ir' and self.ir_l_img is not None and self.ir_r_img is not None:
                # debug mode
                ir_l_ds = self.ir_l_img[::2, ::2]
                ir_r_ds = self.ir_r_img[::2, ::2]
                rgb_ds = self.rgb_img[::2, ::2]
                diff_ds = self.diff_img
                max_height = max(rgb_ds.shape[0], diff_ds.shape[0], ir_l_ds.shape[0], ir_r_ds.shape[0])
                max_width = max(rgb_ds.shape[1], diff_ds.shape[1], ir_l_ds.shape[1], ir_r_ds.shape[1])

                def pad_to_size(img, target_h, target_w):
                    h, w = img.shape[:2]
                    if len(img.shape) == 3:
                        padded = np.zeros((target_h, target_w, img.shape[2]), dtype=img.dtype)
                        padded[:h, :w] = img
                    else:
                        padded = np.zeros((target_h, target_w), dtype=img.dtype)
                        padded[:h, :w] = img
                    return padded

                rgb_ds = pad_to_size(rgb_ds, max_height, max_width)
                diff_ds = pad_to_size(diff_ds, max_height, max_width)
                ir_l_ds = pad_to_size(ir_l_ds, max_height, max_width)
                ir_r_ds = pad_to_size(ir_r_ds, max_height, max_width)

                top_row = np.hstack([rgb_ds, diff_ds])
                bottom_row = np.hstack([ir_l_ds, ir_r_ds])
                color_frame = np.vstack([top_row, bottom_row])
            else:
                color_frame = self.rgb_img

            color_img = QImage(color_frame,
                               color_frame.shape[1],
                               color_frame.shape[0],
                               QImage.Format_RGB888)
            if not color_img.isNull():
                pixmap = QPixmap.fromImage(color_img)
                self.image_item_rgb.setPixmap(pixmap)
                self.image_item_rgb.setZValue(0)
                self.show_rgb_frame()

    def on_processing_dp_finished(self, result):
        self.disp_img = result.get('disp', None)
        self.depth_img = result.get('depth', None)
        self.pse_color_img = result.get('pse_color', None)
        self.timestamp_disp = result.get('timestamp', 0)

        if self.disp_img is not None:
            self.matcher.add_depth_data(self.disp_img,
                                        self.depth_img,
                                        self.timestamp_disp,
                                        pse_color_depth_img=self.pse_color_img)

        if self.pse_color_img is not None:
            pse_color_img = QImage(self.pse_color_img,
                                   self.pse_color_img.shape[1],
                                   self.pse_color_img.shape[0],
                                   QImage.Format_RGB888)

            if not pse_color_img.isNull():
                pixmap = QPixmap.fromImage(pse_color_img)
                self.image_item_depth.setPixmap(pixmap)
                self.image_item_depth.setZValue(0)
                self.show_depth_frame()

    def show_depth_frame(self):
        if not self.view_depth.scene():
            self.scene_depth.addItem(self.image_item_depth)
        self.view_depth.setScene(self.scene_depth)
        self.view_depth.fitInView(self.image_item_depth, Qt.KeepAspectRatio)

    def show_rgb_frame(self):
        if not self.view_rgb.scene():
            self.scene_rgb.addItem(self.image_item_rgb)
        self.view_rgb.setScene(self.scene_rgb)
        self.view_rgb.fitInView(self.image_item_rgb, Qt.KeepAspectRatio)

    def resizeEvent(self, event):
        super().resizeEvent(event)
        if not self.image_item_rgb.pixmap().isNull():
            self.view_rgb.fitInView(self.image_item_rgb, Qt.KeepAspectRatio)
        if not self.image_item_depth.pixmap().isNull():
            self.view_depth.fitInView(self.image_item_depth, Qt.KeepAspectRatio)

    def closeEvent(self, event):
        self.stop_process_uvc_buffer()
        self.image_processor.stop()
        super().closeEvent(event)
        event.accept()


def main():
    QApplication.setAttribute(Qt.AA_EnableHighDpiScaling, True)
    QApplication.setAttribute(Qt.AA_UseHighDpiPixmaps, True)

    if hasattr(Qt, 'HighDpiScaleFactorRoundingPolicy'):
        QApplication.setHighDpiScaleFactorRoundingPolicy(
            Qt.HighDpiScaleFactorRoundingPolicy.PassThrough)

    app = QApplication(sys.argv)
    demo = RMSLViewer()
    app.exec_()


if __name__ == '__main__':
    main()
