"""
!/usr/bin/env python
-*- coding: utf-8 -*-
@CreateTime : 2025/8/16 14:25
@Author  :    AnimateX
@Contact :    animatex@163.com
@File    :    rmsl_viewer_release.py
@License :    Copyright © 2025 AnimateX. All rights reserved.
@Version :    rmsl_viewer_release_2025/8/16.0.1

-------------------------------------------------------------------------------
# @Description:

    ---------------------------------------------------------------------------
    [Update History]:
        2025/8/16: v1.1
        2025/8/21: v1.2
        2025/9/8:  v1.3  新增点击弹出颜色信息或者深度信息

-------------------------------------------------------------------------------
"""
from PyQt5.QtGui import QPixmap, QImage, QPainter, QIcon, QColor
from PyQt5.QtCore import QObject, QTimer, Qt, pyqtSignal, QThread, pyqtSlot, QEvent
from PyQt5.QtWidgets import (QWidget, QGraphicsScene, QGraphicsView, QApplication,
                             QMessageBox, QGraphicsPixmapItem, QFileDialog, QToolTip)

import os
import cv2
import sys
import time
import json
import struct
import queue
import threading
import subprocess
import numpy as np
from pathlib import Path
from collections import deque
from datetime import datetime
from typing import Tuple, Dict, Optional, Union, Any

from viewer_rc import Ui_RMSLViewer


def get_project_root():
    if "__file__" in globals():
        root = Path(__file__).resolve().parent
    else:
        root = Path(sys.argv[0]).resolve().parent
    return root


PROJECT_ROOT = get_project_root()
# log
RMSL_LOG_PATH = PROJECT_ROOT / "viewer.log"
# adb
ADB = str(PROJECT_ROOT / "adb/adb.exe")

MessageBox_Style = """
    QMessageBox {
        background-color: #F5F5F5;
        font-family: 'YaHei Consolas Hybrid', 'Microsoft YaHei', sans-serif;
        font-size: 10px;
        padding: 10px;
        spacing: 8px;
    }
    QMessageBox QLabel {
        color: #333333;
        font-size: 12px;
        min-width: 0px;
        margin-left: 0px;
        padding-left: 0px;
    }
    QMessageBox::icon-label {
        min-width: 32px;
        min-height: 32px;
        margin-right: 0px;
        padding: 0px;
        qproperty-alignment: AlignCenter;
    }
    QMessageBox QPushButton {
        background-color: #4A90E2;
        font-family: 'YaHei Consolas Hybrid', 'Microsoft YaHei', sans-serif;
        font-size: 12px;
        color: white;
        border-radius: 4px;
        padding: 5px 15px;
        outline: none;
        border: none;
        min-width: 80px;
        min-height: 25px;
    }
    QMessageBox QPushButton:hover {
        background-color: #5A9AE8;
    }
    QMessageBox QPushButton:pressed {
        background-color: #3A80D2;
    }
"""


class Logger(QObject):
    log_signal = pyqtSignal(str)

    def __init__(self, log_file_path=None):
        super().__init__()
        self.log_file_path = log_file_path
        self.file_logging_enabled = log_file_path is not None
        self._current_line_start = True

    def _write_to_file(self, msg, with_newline=True, with_timestamp=True):
        if not self.file_logging_enabled:
            return

        try:
            with open(self.log_file_path, 'a', encoding='utf-8') as f:
                if with_timestamp and self._current_line_start:
                    timestamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
                    f.write(f"[{timestamp}] ")

                f.write(msg)

                if with_newline:
                    f.write('\n')
                    self._current_line_start = True
                else:
                    self._current_line_start = False

                f.flush()
        except Exception as e:
            msg = f" [Error] Can not open log file. Error info: {e}"

    def log(self, msg):
        self.log_signal.emit(msg)
        self._write_to_file(msg, with_newline=True)

    def log_to_file(self, msg):
        self._write_to_file(msg, with_newline=True)


logger = None


def init_logger(log_file_path=None):
    global logger
    if logger is None:
        logger = Logger(log_file_path)
    return logger


class SensorDataMatcher:
    def __init__(self, max_time_diff=200, max_queue_size=20):
        self.rgb_buffer = deque()
        self.depth_buffer = deque()
        self.max_time_diff = max_time_diff
        self.max_queue_size = max_queue_size
        self.lock = threading.Lock()
        self.logger = init_logger(RMSL_LOG_PATH)

    def add_rgb_data(self, img_rgb, timestamp):
        with self.lock:
            self.rgb_buffer.append({
                'img': img_rgb,
                'timestamp': timestamp
            })
            if len(self.rgb_buffer) > self.max_queue_size:
                self.rgb_buffer.popleft()

    def add_depth_data(self, img_disp, img_depth, timestamp, pse_color_depth_img=None):
        with self.lock:
            self.depth_buffer.append({
                'disp': img_disp,
                'depth': img_depth,
                'pse_color': pse_color_depth_img,
                'timestamp': timestamp
            })
            if len(self.depth_buffer) > self.max_queue_size:
                self.depth_buffer.popleft()

    def find_best_match(self):
        with self.lock:
            if not self.rgb_buffer or not self.depth_buffer:
                self.logger.log_to_file(f" ⚠️ [Warn] Frame buffer empty!")
                return None, None

            latest_depth = self.depth_buffer.pop()
            target_timestamp = latest_depth['timestamp']

            best_rgb = None
            best_rgb_index = -1
            min_time_diff = float('inf')

            for i, rgb_data in enumerate(reversed(self.rgb_buffer)):
                rgb_timestamp = rgb_data['timestamp']

                if target_timestamp - rgb_timestamp > self.max_time_diff * 2:
                    break

                if rgb_timestamp <= target_timestamp:
                    time_diff = target_timestamp - rgb_timestamp
                    if time_diff < min_time_diff:
                        min_time_diff = time_diff
                        best_rgb = rgb_data
                        best_rgb_index = len(self.rgb_buffer) - 1 - i

                        if time_diff < 5:
                            break

            if best_rgb and min_time_diff <= self.max_time_diff:
                if 0 <= best_rgb_index < len(self.rgb_buffer):
                    del self.rgb_buffer[best_rgb_index]

                return best_rgb, latest_depth
            else:
                self.depth_buffer.append(latest_depth)
                self.logger.log_to_file(f" 🚨 [Error] Failed find depth matched rgb.")
                return None, None


class UVCReader(threading.Thread):
    def __init__(self,
                 camera_name: str = 'UVC Camera',
                 width: int = 1280,
                 height: int = 1080,
                 fps: int = 30,
                 pixel_format: str = 'yuyv422',
                 buffer_size: int = 5,
                 ffmpeg_path: str = './ffmpeg/ffmpeg.exe'):
        super().__init__()
        self.logger = init_logger(RMSL_LOG_PATH)
        self.daemon = True

        self.width = width
        self.height = height
        self.pixel_format = pixel_format
        self.frame_size = width * height * 2
        self.buffer = deque(maxlen=buffer_size)
        self._stop_event = threading.Event()

        self.ffmpeg_command = [
            ffmpeg_path,
            '-f', 'dshow',
            '-rtbufsize', '300M',
            '-video_size', f'{self.width}x{self.height}',
            '-framerate', str(fps),
            '-pixel_format', self.pixel_format,
            '-i', f'video={camera_name}',
            '-c:v', 'rawvideo',
            '-f', 'image2pipe',
            '-'
        ]

        self.process = None

    def run(self):
        try:
            self.process = subprocess.Popen(
                self.ffmpeg_command,
                stdout=subprocess.PIPE,
                stderr=subprocess.DEVNULL
            )

            self.logger.log_to_file(" 🎉 [Info] UVC reader thread started.")

            while not self._stop_event.is_set():
                chunk = self.process.stdout.read(self.frame_size)

                if len(chunk) == self.frame_size:
                    self.buffer.append(chunk)
                elif self.process.poll() is not None:
                    self.logger.log_to_file(" ⚠️ [Warn] FFMPEG process has terminated.")
                    break
                else:
                    self.logger.log_to_file(" 💥 [Error] Incomplete frame data received, might be end of stream.")
                    time.sleep(0.01)

        except FileNotFoundError:
            self.logger.log_to_file(
                f" 💥 [Error] FFMPEG executable not found at '{self.ffmpeg_command[0]}'. Please check the path.")
        except Exception as e:
            self.logger.log_to_file(f" 💥 [Error] An exception occurred in the reader thread: {e}")
            if self.process:
                self.logger.log_to_file(f" 💥 [Error] FFMPEG stderr: {self.process.stderr.read().decode()}")
        finally:
            if self.process and self.process.poll() is None:
                self.process.terminate()
                self.process.wait()
            # self.logger.log_to_file(" 🎉 [Info] UVC reader thread stopped.")

    def get_frame(self):
        try:
            return self.buffer.popleft()
        except IndexError:
            # self.logger.log_to_file(f" 💥 [Error] Failed get frame! Please power off device.")
            return None

    def stop(self):
        self._stop_event.set()
        self.join(timeout=5)
        self.logger.log_to_file(f" 🎉 [Info] UVC reader thread stopped.\n")


class Colorizer:
    def __init__(self,
                 color_map: Optional[Dict[float, np.ndarray]] = None,
                 lut_size: int = 65536,
                 mode='jet_red2blue'):
        self.color_map = color_map if color_map else Colorizer.default_jet_map(mode=mode)
        self.lut_size = lut_size
        self.lut = None
        self._build_lut()

    @staticmethod
    def default_jet_map(mode='jet_red2blue'):
        # Red to blue
        jet_red2blue = {
            0.00: np.array([0.5, 0, 0]),  # 深红色
            0.11: np.array([1, 0, 0]),  # 红色
            0.35: np.array([1, 1, 0]),  # 黄色
            0.50: np.array([0, 1, 0]),  # 绿色
            0.64: np.array([0, 1, 1]),  # 青色
            0.86: np.array([0, 0, 1]),  # 蓝色
            1.00: np.array([0, 0, 0.5]),  # 深蓝色
        }
        jet_blue2red = {
            0.00: np.array([0, 0, 0.5]),  # 深蓝色
            0.11: np.array([0, 0, 1]),  # 蓝色
            0.35: np.array([0, 1, 1]),  # 青色
            0.50: np.array([0, 1, 0]),  # 绿色
            0.64: np.array([1, 1, 0]),  # 黄色
            0.86: np.array([1, 0, 0]),  # 红色
            1.00: np.array([0.5, 0, 0]),  # 深红色
        }
        if mode == 'jet_red2blue':
            return jet_red2blue
        else:
            return jet_blue2red

    def _build_lut(self):
        self.lut = np.zeros((self.lut_size, 3), dtype=np.float32)
        positions = np.array(sorted(self.color_map.keys()))
        colors = np.array([self.color_map[pos] for pos in positions])
        lut_positions = np.linspace(0, 1, self.lut_size)

        for channel in range(3):
            self.lut[:, channel] = np.interp(lut_positions, positions, colors[:, channel])

    def set_colormap(self, colormap: Dict[float, np.ndarray]):
        self.color_map = colormap
        self._build_lut()

    def colorize(self,
                 depth_image: np.ndarray,
                 depth_range: Optional[Tuple[float, float]] = None,
                 normalize: bool = True) -> np.ndarray:
        if not isinstance(depth_image, np.ndarray):
            depth_image = np.array(depth_image)

        if depth_range is None:
            if normalize:
                valid_mask = depth_image > 0
                if np.any(valid_mask):
                    min_depth = np.min(depth_image[valid_mask])
                    max_depth = np.max(depth_image[valid_mask])
                else:
                    min_depth, max_depth = 0, 1
            else:
                min_depth = np.min(depth_image)
                max_depth = np.max(depth_image)
        else:
            min_depth, max_depth = depth_range

        if max_depth == min_depth:
            max_depth = min_depth + 1

        normalized_depth = (depth_image.astype(np.float32) - min_depth) / (max_depth - min_depth)
        normalized_depth = np.clip(normalized_depth, 0, 1)
        lut_indices = (normalized_depth * (self.lut_size - 1)).astype(np.int32)
        colored_image = self.lut[lut_indices]
        colored_image_bgr = (colored_image[:, :, ::-1] * 255).astype(np.uint8)

        return colored_image_bgr


class UtilsBasic(object):
    def __init__(self):
        self.logger = init_logger(RMSL_LOG_PATH)

    def read_json(self, json_path: str) -> dict:
        if not os.path.exists(json_path):
            self.logger.log_to_file(f" 🚨 [Error] config file: {json_path} not exits!")
            raise FileNotFoundError(f" 🚨 [Error] config file: {json_path} not exits!")
        try:
            with open(json_path, 'r', encoding='utf-8') as file:
                config = json.load(file)
            return config
        except json.JSONDecodeError as e:
            self.logger.log_to_file(f" 🚨 [Error] Failed load config file: {json_path}, error log: {e}")
            raise KeyError(f" 🚨 [Error] Failed load config file: {json_path}, error log: {e}")

    def save_path_check(self, save_path: str):
        if not os.path.exists(save_path):
            os.makedirs(save_path)
            self.logger.log_to_file(f" 🎉 [Info] Success create save directory: {save_path}!")

    @staticmethod
    def get_formatted_timestamp():
        formatted_time = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")

        return formatted_time

    def disp_to_depth_vectorized(self,
                                 disp: np.ndarray,
                                 Q23: float,
                                 Q32: float,
                                 Q33: float = 0.0,
                                 subpixel_value: int = 64,
                                 zoom_ratio: float = 1.0) -> Optional[np.ndarray]:
        if disp is None:
            self.logger.log_to_file(f" 🚨 [Error] Check disp image, not initial.")
            return None

        if not isinstance(disp, np.ndarray):
            self.logger.log_to_file(f" 🚨 [Error] Check disp image input, Invalid image.")
            return None

        depth = np.zeros_like(disp, dtype=np.uint16)
        valid_mask = disp != 0

        if np.any(valid_mask):
            disp_float = disp.astype(np.float32)
            denominator = Q32 * disp_float + Q33 * subpixel_value
            valid_denominator = (denominator != 0) & valid_mask

            if np.any(valid_denominator):
                depth_float = np.zeros_like(disp_float)
                depth_float[valid_denominator] = (Q23 * zoom_ratio * subpixel_value /
                                                  denominator[valid_denominator])

                # 四舍五入并转换为uint16，同时处理溢出
                depth_float = np.clip(depth_float, 0, 65535)
                depth = np.round(depth_float).astype(np.uint16)
                depth[~valid_mask] = 0

        return depth

    def generate_pointcloud_by_depth(self,
                                     img_depth,
                                     fx, fy, cx, cy,
                                     img_h, img_w,
                                     min_dis, max_dis,
                                     en_valid_roi=False,
                                     en_cen_roi=False):
        valid_mask = (img_depth >= min_dis) & (img_depth <= max_dis)

        zero_val_cnt = np.sum(img_depth == 0)

        if en_valid_roi:
            if en_cen_roi:
                margin_w = int(0.25 * img_w)
                margin_h = int(0.25 * img_h)
            else:
                margin_w = int(0.05 * img_w)
                margin_h = int(0.05 * img_h)

            start_u, end_u = margin_w, img_w - margin_w
            start_v, end_v = margin_h, img_h - margin_h

            u, v = np.meshgrid(np.arange(start_u, end_u), np.arange(start_v, end_v))

            x_data = (u - cx) * img_depth[start_v: end_v, start_u: end_u] / fx
            y_data = - (v - cy) * img_depth[start_v: end_v, start_u: end_u] / fy
            z_data = - img_depth[start_v: end_v, start_u: end_u]

            valid_x_data = x_data[valid_mask[start_v:end_v, start_u:end_u]]
            valid_y_data = y_data[valid_mask[start_v:end_v, start_u:end_u]]
            valid_z_data = z_data[valid_mask[start_v:end_v, start_u:end_u]]

            point_cloud_show = np.column_stack((valid_x_data, valid_y_data, valid_z_data))
            point_cloud_save = np.column_stack((valid_x_data, - valid_y_data, - valid_z_data))
        else:
            """ Global point cloud """
            u, v = np.meshgrid(np.arange(0, img_w), np.arange(0, img_h))

            x_data = (u - cx) * img_depth / fx
            y_data = - (v - cy) * img_depth / fy
            z_data = - img_depth

            valid_x_data = x_data[valid_mask]
            valid_y_data = y_data[valid_mask]
            valid_z_data = z_data[valid_mask]

            point_cloud_show = np.column_stack((valid_x_data, valid_y_data, valid_z_data))
            point_cloud_save = np.column_stack((valid_x_data, - valid_y_data, - valid_z_data))

        self.logger.log_to_file(f" 🎉 [Info] Success generate pointcloud, zero points count: {zero_val_cnt}")

        return point_cloud_show, point_cloud_save, zero_val_cnt

    def generate_rgb_pointcloud(self, rgb, depth, rgbCamParam: dict, depth_ratio: float = 1.0, dis_near: int = 100,
                                dis_far: int = 3000):
        fx = rgbCamParam['fx']
        fy = rgbCamParam['fy']
        cx = rgbCamParam['cx']
        cy = rgbCamParam['cy']

        dis_near *= depth_ratio
        dis_far *= depth_ratio

        height, width = depth.shape
        ratio = 1 / depth_ratio

        # Create meshgrid for pixel coordinates
        x = np.arange(width)
        y = np.arange(height)
        xv, yv = np.meshgrid(x, y)

        # Flatten the arrays for easier processing
        xv = xv.flatten()
        yv = yv.flatten()
        depth_flat = depth.flatten()

        # Filter points based on depth range
        valid = (depth_flat >= dis_near) & (depth_flat <= dis_far)
        xv = xv[valid]
        yv = yv[valid]
        depth_flat = depth_flat[valid]

        z = depth_flat * ratio
        x = (xv - cx) * z / fx
        y = (yv - cy) * z / fy

        # Correct RGB reading order
        rgb_flat = rgb.reshape(-1, 3)
        rgb_flat = rgb_flat[valid]

        # Create the point cloud array
        points = np.zeros((len(z), 6), dtype=np.float32)
        points[:, 0] = x
        points[:, 1] = y
        points[:, 2] = z
        points[:, 3] = rgb_flat[:, 0]  # r
        points[:, 4] = rgb_flat[:, 1]  # g
        points[:, 5] = rgb_flat[:, 2]  # b

        self.logger.log_to_file(f" 🎉 [Info] Success generate rgb pointcloud.")

        return points

    def save_ply_ascii_vectorized(self, point_cloud_data, filename):
        n_points = len(point_cloud_data)

        # PLY文件头
        header = f"""ply
    format ascii 1.0
    element vertex {n_points}
    property float x
    property float y
    property float z
    end_header
    """

        # 使用列表推导式和join，比循环快很多
        data_lines = [f"{point[0]:.6f} {point[1]:.6f} {point[2]:.6f}" for point in point_cloud_data]
        data_content = '\n'.join(data_lines)

        with open(filename, 'w') as f:
            f.write(header + data_content)

        self.logger.log_to_file(f" 🎉 [Info] Success save ascii vectorized pointcloud")

    def save_ply_binary_rgb(self, point_cloud_data, filename):
        if point_cloud_data.shape[1] != 6:
            self.logger.log_to_file(f" 💥 [Error] Input data size: [Nx6](x, y, z, r, g, b)")
            raise ValueError(f" 💥 [Error] Input data size: [Nx6](x, y, z, r, g, b)")

        n_points = len(point_cloud_data)

        with open(filename, 'wb') as f:
            # 写入头部
            header_lines = [
                "ply",
                "format binary_little_endian 1.0",
                f"element vertex {n_points}",
                "property float x",
                "property float y",
                "property float z",
                "property uchar red",
                "property uchar green",
                "property uchar blue",
                "end_header",
            ]
            header = "\n".join(header_lines) + "\n"
            f.write(header.encode('ascii'))

            # 创建结构化数组
            vertices = np.zeros(n_points, dtype=[
                ('x', 'f4'), ('y', 'f4'), ('z', 'f4'),
                ('r', 'u1'), ('g', 'u1'), ('b', 'u1')
            ])

            # 坐标数据
            xyz = point_cloud_data[:, :3].astype(np.float32)
            vertices['x'] = xyz[:, 0]
            vertices['y'] = xyz[:, 1]
            vertices['z'] = xyz[:, 2]

            # 颜色数据处理 - 关键修正点
            rgb_data = point_cloud_data[:, 3:6]

            # 检查数据范围并处理
            if rgb_data.max() <= 1.0:
                # 0-1范围，需要缩放到0-255
                rgb = (rgb_data * 255).astype(np.uint8)
            else:
                # 已经是0-255范围
                rgb = rgb_data.astype(np.uint8)

            # 正确的RGB映射 (不要搞成BGR!)
            vertices['r'] = rgb[:, 0]  # Red = 第0列
            vertices['g'] = rgb[:, 1]  # Green = 第1列
            vertices['b'] = rgb[:, 2]  # Blue = 第2列

            f.write(vertices.tobytes())

        self.logger.log_to_file(f" 🎉 [Info] Success save rgb-pointcloud(binary fast)")

    def save_ply_binary_fast(self, point_cloud_data, filename):
        n_points = len(point_cloud_data)

        with open(filename, 'wb') as f:
            header = f"ply\nformat binary_little_endian 1.0\nelement vertex {n_points}\n"
            header += "property float x\nproperty float y\nproperty float z\nend_header\n"
            f.write(header.encode('ascii'))

            point_cloud_float32 = point_cloud_data.astype(np.float32, copy=False)
            if not point_cloud_float32.flags.c_contiguous:
                point_cloud_float32 = np.ascontiguousarray(point_cloud_float32)

            f.write(point_cloud_float32.tobytes())

        self.logger.log_to_file(f" 🎉 [Info] Success save pointcloud(binary fast)")

    def save_pointcloud_fast(self, point_cloud_save, filename="pointcloud.ply"):
        if len(point_cloud_save) > 50000:
            self.save_ply_binary_fast(point_cloud_save, filename)
        else:
            self.save_ply_ascii_vectorized(point_cloud_save, filename)

    def is_file_on_device(self, device_path: str, device_serial: Optional[str] = None) -> bool:
        shell_command = f'test -e "{device_path}"; echo $?'

        adb_command = [ADB]
        if device_serial:
            adb_command.extend(['-s', device_serial])
        adb_command.extend(['shell', shell_command])

        try:
            result = subprocess.run(
                adb_command,
                capture_output=True,
                text=True,
                check=False
            )

            if result.returncode != 0:
                self.logger.log_to_file(f" 🚨 [Error] Failed operate ADB cmd. Return Code: {result.returncode}")
                if result.stderr:
                    self.logger.log_to_file(f" 🚨 [Error] ADB Error info: {result.stderr.strip()}")
                return False

            try:
                inner_exit_code = int(result.stdout.strip())

                if inner_exit_code == 0:
                    return True
                else:
                    return False
            except (ValueError, IndexError):
                self.logger.log_to_file(
                    f' 🚨 [Error] ADB exception, stdout: {result.stdout.strip()}, stderr: {result.stderr.strip()}')
                return False

        except FileNotFoundError:
            self.logger.log_to_file(f' 🚨 [Error] Can not find adb command.')
            return False
        except Exception as e:
            self.logger.log_to_file(f' 🚨 [Error] ADB Unknown error: {e}.')
            return False

    def pull_file_from_single_device(self, calib_yaml_evb_path: str, calib_yaml_path: str) -> bool:
        self.logger.log_to_file(" 🎉 [Info] --- Starting device wait and file pull process ---")
        if os.path.exists(calib_yaml_path):
            # self.logger.log_to_file(f" ⚠️ [Warn] Delete cache file: {calib_yaml_path}")
            os.remove(calib_yaml_path)

        try:
            self.logger.log_to_file(" ⏳ [Info] Waiting for a device to be connected and ready...")

            subprocess.run(
                [ADB, 'wait-for-device'],
                check=True,
                capture_output=True
            )

            self.logger.log_to_file(" ✅ [Info] Device detected and ready!")

            pull_command = [ADB, 'pull', calib_yaml_evb_path, calib_yaml_path]
            pull_result = subprocess.run(
                pull_command,
                capture_output=True,
                text=True,
                check=True
            )

            self.logger.log_to_file(" 🎉 [Info] Success! File has been pulled successfully.")
            return True
        except FileNotFoundError:
            self.logger.log_to_file(" 💥 [Error] Critical Error: 'adb' command not found.")
            self.logger.log_to_file(" 💥 [Error] Please ensure ADB is installed and in your system's PATH.")
            return False
        except subprocess.CalledProcessError as e:
            error_message = e.stderr.strip()
            if "wait-for-device" in str(e.args):
                self.logger.log_to_file(f" 🚨 [Error] An error occurred while waiting for the device: {error_message}")
            else:
                self.logger.log_to_file(f" 🚨 [Error] Error during 'adb pull': {error_message}")
                self.logger.log_to_file(f" 🚨 [Error]   Please check the following:")
                self.logger.log_to_file(f" 🚨 [Error]   1. Is the remote file path '{calib_yaml_evb_path}' correct?")
                self.logger.log_to_file(f" 🚨 [Error]   2. Do you have read permissions for that file on the device?")
            return False
        except Exception as e:
            self.logger.log_to_file(f" 🚨 [Error] An unexpected error occurred: {e}")
            return False

    def save_file(self, img: np.ndarray, save_dir: str, prefix: str, timestamp: str, en_rgb2bgr: False):
        if img is not None:
            img_name = f'{prefix}_{timestamp}.png'
            img_path = os.path.join(save_dir, img_name)
            if en_rgb2bgr:
                img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
            cv2.imwrite(img_path, img)
            self.logger.log_to_file(f' 🎉 [Info] Downloaded {prefix} image to dir: {save_dir}')


class CalibrationReader:
    def __init__(self, yaml_path):
        self.logger = init_logger(RMSL_LOG_PATH)
        self.fs = cv2.FileStorage(yaml_path, cv2.FILE_STORAGE_READ)
        if not self.fs.isOpened():
            self.logger.log_to_file(f" 🚨 [Error] Cant not open calib file: {yaml_path}")
            raise ValueError(f" 🚨 [Error] Can not open calib file: {yaml_path}")

    def read_scalar(self, key, dtype=int, default=None):
        node = self.fs.getNode(key)
        if node.empty():
            return default

        if dtype == int:
            return int(node.real())
        elif dtype == float:
            return node.real()
        else:
            return node.string()

    def read_matrix(self, key, default=None):
        node = self.fs.getNode(key)
        if node.empty():
            return default
        return node.mat()

    def read_calibration(self, required_keys=None, optional_keys=None):
        intrinsic_info = {}

        if required_keys is None:
            required_keys = ["l_width", "l_height", "K_l", "D_l", "Q"]

        if optional_keys is None:
            optional_keys = {
                "r_width": (int, 640),
                "r_height": (int, 352),
                "rgb_width": (int, 1280),
                "rgb_height": (int, 1080),
                "R1": (None, None),
                "R2": (None, None),
                "P1": (None, None),
                "P2": (None, None),
                "K_rgb": (None, None),
                "D_rgb": (None, None),
                "R_ir2rgb": (None, None),
                "T_ir2rgb": (None, None),
                "invPR1": (None, None)
            }

        for key in required_keys:
            if key in ["K_l", "K_r", "D_l", "D_r", "R", "T", "Q", "R1", "R2", "P1", "P2"]:
                value = self.read_matrix(key)
            else:
                value = self.read_scalar(key)

            if value is None:
                self.logger.log_to_file(f"🚨 [Error] Core key: {key} not exists!")
                raise ValueError(f"🚨 [Error] Core key: '{key}' not exists!")
            intrinsic_info[key] = value

        for key, (dtype, default) in optional_keys.items():
            if dtype is None:
                intrinsic_info[key] = self.read_matrix(key, default)
            else:
                intrinsic_info[key] = self.read_scalar(key, dtype, default)

        return intrinsic_info

    def close(self):
        if self.fs.isOpened():
            self.fs.release()

    def __enter__(self):
        return self

    def __exit__(self, exc_type, exc_val, exc_tb):
        self.close()


class ParserCamIntrinsic(object):
    def __init__(self, intrinsic_yaml_path: str = './config/calibration.yaml'):
        self.logger = init_logger(RMSL_LOG_PATH)
        self.intrinsic_yaml_path = intrinsic_yaml_path
        if not os.path.exists(intrinsic_yaml_path):
            self.logger.log_to_file(f" 🚨 [Error] Check camera intrinsic yaml path: {intrinsic_yaml_path}")
            raise FileNotFoundError(f" 🚨 [Error] Check camera intrinsic yaml path: {intrinsic_yaml_path}")

    def get_all_camera_intrinsic(self):
        calib_info = dict()

        with CalibrationReader(self.intrinsic_yaml_path) as reader:
            intrinsic_info = reader.read_calibration()

            calib_info['l_width'] = intrinsic_info['l_width']
            calib_info['l_height'] = intrinsic_info['l_height']

            # TODO： 如果是双RGB模组 `l_intrinsic` 对应标定的RGB模组，反之 RGBD模组 对应左IR.
            calib_info['l_intrinsic'] = {
                'fx': float(intrinsic_info['K_l'][0, 0]),
                'fy': float(intrinsic_info['K_l'][1, 1]),
                'cx': float(intrinsic_info['K_l'][0, 2]),
                'cy': float(intrinsic_info['K_l'][1, 2]),
                'K_l': intrinsic_info['K_l']
            }

            D_l_flat = intrinsic_info['D_l'].flatten()
            calib_info['l_distortion'] = {
                'k1': float(D_l_flat[0]), 'k2': float(D_l_flat[1]),
                'p1': float(D_l_flat[2]), 'p2': float(D_l_flat[3]),
                'k3': float(D_l_flat[4]), 'k4': float(D_l_flat[5]),
                'k5': float(D_l_flat[6]), 'k6': float(D_l_flat[7]),
                's1': float(D_l_flat[8]), 's2': float(D_l_flat[9]),
                's3': float(D_l_flat[10]), 's4': float(D_l_flat[11]),
                'D_l': intrinsic_info['D_l']
            }

            calib_info['R1'] = intrinsic_info['R1']
            calib_info['P1'] = intrinsic_info['P1']

            calib_info['q23'] = intrinsic_info['Q'][2, 3]
            calib_info['q32'] = intrinsic_info['Q'][3, 2]
            calib_info['q33'] = intrinsic_info['Q'][3, 3]

            if intrinsic_info['K_rgb'] is not None:
                calib_info['rgb_width'] = intrinsic_info['rgb_width']
                calib_info['rgb_height'] = intrinsic_info['rgb_height']
                calib_info['rgb_intrinsic'] = {
                    'fx': float(intrinsic_info['K_rgb'][0, 0]),
                    'fy': float(intrinsic_info['K_rgb'][1, 1]),
                    'cx': float(intrinsic_info['K_rgb'][0, 2]),
                    'cy': float(intrinsic_info['K_rgb'][1, 2]),
                    'K': intrinsic_info['K_rgb']
                }

                D_rgb_flat = intrinsic_info['D_rgb'].flatten()
                calib_info['rgb_distortion'] = {
                    'k1': float(D_rgb_flat[0]), 'k2': float(D_rgb_flat[1]),
                    'p1': float(D_rgb_flat[2]), 'p2': float(D_rgb_flat[3]),
                    'k3': float(D_rgb_flat[4]), 'k4': float(D_rgb_flat[5]),
                    'k5': float(D_rgb_flat[6]), 'k6': float(D_rgb_flat[7]),
                    's1': float(D_rgb_flat[8]), 's2': float(D_rgb_flat[9]),
                    's3': float(D_rgb_flat[10]), 's4': float(D_rgb_flat[11]),
                    'D': intrinsic_info['D_rgb']
                }

                calib_info['r_coeff'] = {}
                R_proj = intrinsic_info['K_rgb'] @ intrinsic_info['R_ir2rgb'] @ intrinsic_info['invPR1']
                calib_info['r_coeff']['r0'] = float(R_proj[0, 0])
                calib_info['r_coeff']['r1'] = float(R_proj[0, 1])
                calib_info['r_coeff']['r2'] = float(R_proj[0, 2])
                calib_info['r_coeff']['r3'] = float(R_proj[1, 0])
                calib_info['r_coeff']['r4'] = float(R_proj[1, 1])
                calib_info['r_coeff']['r5'] = float(R_proj[1, 2])
                calib_info['r_coeff']['r6'] = float(R_proj[2, 0])
                calib_info['r_coeff']['r7'] = float(R_proj[2, 1])
                calib_info['r_coeff']['r8'] = float(R_proj[2, 2])

                calib_info['t_coeff'] = {}
                T_proj = intrinsic_info['K_rgb'] @ intrinsic_info['T_ir2rgb']
                calib_info['t_coeff']['t0'] = float(T_proj[0, 0])
                calib_info['t_coeff']['t1'] = float(T_proj[1, 0])
                calib_info['t_coeff']['t2'] = float(T_proj[2, 0])
                calib_info['category'] = 'rgbd'  # rgbd 模组
            else:
                calib_info['rgb_width'] = intrinsic_info['rgb_width']
                calib_info['rgb_height'] = intrinsic_info['rgb_height']
                calib_info['rgb_intrinsic'] = None
                calib_info['rgb_distortion'] = None
                calib_info['r_coeff'] = None
                calib_info['t_coeff'] = None
                calib_info['category'] = '2rgb'  # 双rgb模组

        return calib_info


class RMSLAlign(object):
    def __init__(self,
                 src_w: int, src_h: int,
                 dst_w: int, dst_h: int,
                 extend_w_pixels=256,
                 extend_h_pixels=256,
                 rotate_angle=0):
        self.src_w = src_w
        self.src_h = src_h
        self.dst_w = dst_w
        self.dst_h = dst_h
        self.rotate_angle = rotate_angle
        self.extend_w_pixel = extend_w_pixels
        self.extend_h_pixel = extend_h_pixels

        self.extend_buf_w = dst_w + 2 * self.extend_w_pixel
        self.extend_buf_h = dst_h + 2 * self.extend_h_pixel

        self.lut = np.zeros((self.src_h, self.src_w, 3), dtype=np.float32)
        self.dp_extend = np.zeros((self.extend_buf_h, self.extend_buf_w), dtype=np.uint16)

        self.logger = init_logger(RMSL_LOG_PATH)

    def create_lut(self, r_coeff: dict):
        if len(r_coeff) == 0 or r_coeff is None:
            self.logger.log_to_file(f" 🚨 [Error] Check calib_data file(failed get r and t coeff)")
            return

        cols, rows = np.meshgrid(np.arange(self.src_w), np.arange(self.src_h))
        x = r_coeff['r0'] * cols + r_coeff['r1'] * rows + r_coeff['r2']
        y = r_coeff['r3'] * cols + r_coeff['r4'] * rows + r_coeff['r5']
        z = r_coeff['r6'] * cols + r_coeff['r7'] * rows + r_coeff['r8']

        self.lut[:, :, 0] = x
        self.lut[:, :, 1] = y
        self.lut[:, :, 2] = z

    def align_dp_to_rgb(self, dp_img, t_coeff: dict):
        self.dp_extend.fill(0)

        valid_mask = dp_img > 0
        if not np.any(valid_mask):
            return self.dp_extend[
                   self.extend_h_pixel:self.extend_buf_h - self.extend_h_pixel,
                   self.extend_w_pixel:self.extend_buf_w - self.extend_w_pixel
                   ]

        valid_coords = np.where(valid_mask)
        valid_dp_vals = dp_img[valid_coords]
        valid_lut = self.lut[valid_coords]

        uv_color = np.column_stack([
            valid_dp_vals * valid_lut[:, 0] + t_coeff['t0'],
            valid_dp_vals * valid_lut[:, 1] + t_coeff['t1'],
            valid_dp_vals * valid_lut[:, 2] + t_coeff['t2']
        ])

        z_valid = uv_color[:, 2] >= 1e-6
        if not np.any(z_valid):
            return self.dp_extend[
                   self.extend_h_pixel:self.extend_buf_h - self.extend_h_pixel,
                   self.extend_w_pixel:self.extend_buf_w - self.extend_w_pixel
                   ]

        uv_color = uv_color[z_valid]

        inv_z = 1.0 / uv_color[:, 2]
        new_coords = np.column_stack([
            (uv_color[:, 0] * inv_z + 0.5).astype(np.int32),
            (uv_color[:, 1] * inv_z + 0.5).astype(np.int32)
        ])

        if self.rotate_angle == 90:
            new_coords = np.column_stack([new_coords[:, 1], self.dst_h - 1 - new_coords[:, 0]])
        elif self.rotate_angle == 180:
            new_coords = np.column_stack([
                self.dst_w - 1 - new_coords[:, 0],
                self.dst_h - 1 - new_coords[:, 1]
            ])
        elif self.rotate_angle == 270:
            new_coords = np.column_stack([self.dst_w - 1 - new_coords[:, 1], new_coords[:, 0]])

        bounds_valid = (
                (new_coords[:, 0] >= 0) & (new_coords[:, 0] < self.dst_w) &
                (new_coords[:, 1] >= 0) & (new_coords[:, 1] < self.dst_h)
        )

        if not np.any(bounds_valid):
            return self.dp_extend[
                   self.extend_h_pixel:self.extend_buf_h - self.extend_h_pixel,
                   self.extend_w_pixel:self.extend_buf_w - self.extend_w_pixel
                   ]

        new_coords = new_coords[bounds_valid]
        pixel_values = uv_color[bounds_valid, 2].astype(np.int32)
        final_coords = new_coords + np.array([self.extend_w_pixel, self.extend_h_pixel])
        self._fill_2x2_vectorized(final_coords, pixel_values)

        return self.dp_extend[
               self.extend_h_pixel:self.extend_buf_h - self.extend_h_pixel,
               self.extend_w_pixel:self.extend_buf_w - self.extend_w_pixel
               ]

    def _fill_2x2_vectorized(self, coords, values):
        x_coords, y_coords = coords[:, 0], coords[:, 1]

        if self.rotate_angle == 0:
            offsets = [(0, 0), (1, 0), (0, 1), (1, 1)]
        elif self.rotate_angle == 90:
            offsets = [(0, 0), (1, 0), (0, -1), (1, -1)]
        elif self.rotate_angle == 180:
            offsets = [(0, 0), (-1, 0), (0, -1), (-1, -1)]
        else:  # 270
            offsets = [(0, 0), (-1, 0), (0, 1), (-1, 1)]

        for dx, dy in offsets:
            final_x = x_coords + dx
            final_y = y_coords + dy

            # 边界检查
            valid_indices = (
                    (final_x >= 0) & (final_x < self.dp_extend.shape[1]) &
                    (final_y >= 0) & (final_y < self.dp_extend.shape[0])
            )

            if np.any(valid_indices):
                self.dp_extend[final_y[valid_indices], final_x[valid_indices]] = values[valid_indices]


class SaveWorker(QObject):
    finished = pyqtSignal()
    log_message = pyqtSignal(str)

    def __init__(self, images_dict: dict, calib_info: dict):
        super().__init__()
        self.logger = init_logger(RMSL_LOG_PATH)

        self.calib_info = calib_info
        self.utils = UtilsBasic()
        self.save_dir = str(PROJECT_ROOT / 'save')
        self.utils.save_path_check(self.save_dir)

        """-------------------------------------------------------------------------------------"""
        # Get config info, 确认保存的点云类型和点云的有效范围
        self.config_file_path = str(PROJECT_ROOT / 'config.json')
        config_info = self.utils.read_json(self.config_file_path)

        # work_mode: 确认是否进入debug模式
        self.work_mode = config_info.get('work_mode', 'rgbd')
        # save_rgb_pointcloud: 确认是否保存 rgb 点云
        self.save_rgb_pointcloud = config_info.get('save_rgb_pointcloud', 0)
        # min_distance_mm/max_distance_mm: 有效深度范围
        self.min_depth_dis = config_info.get('min_distance_mm', 100)  # mm
        self.max_depth_dis = config_info.get('max_distance_mm', 5000)  # mm

        """-------------------------------------------------------------------------------------"""
        # 解析用来保存的 images_dict
        self.img_rgb = images_dict.get('rgb', None)
        self.img_disp = images_dict.get('disp', None)
        self.img_depth = images_dict.get('depth', None)
        self.img_pse_color = images_dict.get('pse_color', None)

        self.img_ir_l = images_dict.get('ir_l', None)
        self.img_ir_r = images_dict.get('ir_r', None)
        self.img_rgb_l = images_dict.get('rgb_l', None)
        self.img_rgb_r = images_dict.get('rgb_r', None)
        self.img_diff = images_dict.get('diff', None)

        """-------------------------------------------------------------------------------------"""
        # Get intrinsic params
        self.fx = self.calib_info['l_intrinsic']['fx']
        self.fy = self.calib_info['l_intrinsic']['fy']
        self.cx = self.calib_info['l_intrinsic']['cx']
        self.cy = self.calib_info['l_intrinsic']['cy']

        # RMSL322-RGBD/RMSL322-2RGB
        self.module_category = self.calib_info['category']

        if self.save_rgb_pointcloud == 1 and self.module_category == 'rgbd':
            # Get r_coeff and t_coeff
            self.r_coeff = self.calib_info['r_coeff']
            self.t_coeff = self.calib_info['t_coeff']

            self.aligner = RMSLAlign(src_w=self.img_depth.shape[1],
                                     src_h=self.img_depth.shape[0],
                                     dst_w=self.img_rgb.shape[1],
                                     dst_h=self.img_rgb.shape[0],
                                     extend_w_pixels=256,
                                     extend_h_pixels=256,
                                     rotate_angle=0)
            self.aligner.create_lut(self.r_coeff)

    @pyqtSlot()
    def download_image_and_pointcloud(self):
        try:
            timestamp = UtilsBasic.get_formatted_timestamp()
            if self.img_rgb is not None:
                self.utils.save_file(self.img_rgb, self.save_dir, 'rgb', timestamp, en_rgb2bgr=True)

            """---------------------------------------------------------------------------------"""
            # Get debug data, ir_l, ir_r, diff_disp(pse color)
            if self.work_mode == 'rgbd-debug':
                if self.img_ir_l is not None:
                    self.utils.save_file(self.img_ir_l, self.save_dir,
                                         'ir_left', timestamp, en_rgb2bgr=True)
                if self.img_ir_r is not None:
                    self.utils.save_file(self.img_ir_r, self.save_dir,
                                         'ir_right', timestamp, en_rgb2bgr=True)
                if self.img_diff is not None:
                    self.utils.save_file(self.img_diff, self.save_dir,
                                         'disp_diff', timestamp, en_rgb2bgr=True)

            if self.work_mode == '2rgb-debug':
                if self.img_rgb_l is not None:
                    self.utils.save_file(self.img_rgb_l, self.save_dir,
                                         'rgb_left', timestamp, en_rgb2bgr=True)
                if self.img_rgb_r is not None:
                    self.utils.save_file(self.img_rgb_r, self.save_dir,
                                         'rgb_right', timestamp, en_rgb2bgr=True)

            self.log_message.emit(f' 🎉 [Info] Success download rgb image to dir: {self.save_dir}')
            """---------------------------------------------------------------------------------"""
            if self.img_disp is not None:
                self.utils.save_file(self.img_disp, self.save_dir, 'disp', timestamp, en_rgb2bgr=False)

            self.log_message.emit(f' 🎉 [Info] Success download disp image to dir: {self.save_dir}')

            if self.img_depth is not None:
                # save depth
                self.utils.save_file(self.img_depth, self.save_dir, 'depth', timestamp, en_rgb2bgr=False)

                # save pse-color depth
                self.utils.save_file(self.img_pse_color, self.save_dir, 'depth_pse_color', timestamp, en_rgb2bgr=True)

                # Generate pointcloud
                pointcloud_name = f'pointcloud_{timestamp}.ply'
                pointcloud_path = os.path.join(self.save_dir, pointcloud_name)

                if self.save_rgb_pointcloud == 1:
                    # 保存rgb点云
                    if self.module_category == 'rgbd':
                        # rgbd
                        align_depth_img = self.aligner.align_dp_to_rgb(self.img_depth, self.t_coeff)
                        pointcloud_save = self.utils.generate_rgb_pointcloud(self.img_rgb,
                                                                             align_depth_img,
                                                                             self.calib_info['rgb_intrinsic'],
                                                                             depth_ratio=1.0,
                                                                             dis_near=self.min_depth_dis,
                                                                             dis_far=self.max_depth_dis)
                    else:
                        # 2rgb
                        pointcloud_save = self.utils.generate_rgb_pointcloud(self.img_rgb,
                                                                             self.img_depth,
                                                                             self.calib_info['l_intrinsic'],
                                                                             depth_ratio=1.0,
                                                                             dis_near=self.min_depth_dis,
                                                                             dis_far=self.max_depth_dis)
                    self.utils.save_ply_binary_rgb(pointcloud_save, pointcloud_path)
                else:
                    _, pointcloud_save, _ = self.utils.generate_pointcloud_by_depth(self.img_depth,
                                                                                    self.fx,
                                                                                    self.fy,
                                                                                    self.cx,
                                                                                    self.cy,
                                                                                    self.img_depth.shape[0],
                                                                                    self.img_depth.shape[1],
                                                                                    self.min_depth_dis,
                                                                                    self.max_depth_dis,
                                                                                    en_valid_roi=False,
                                                                                    en_cen_roi=False)
                    self.utils.save_pointcloud_fast(pointcloud_save, pointcloud_path)

                self.log_message.emit(f' 🎉 [Info] Success download depth/pointcloud to dir: {self.save_dir}')
        except Exception as e:
            self.log_message.emit(f' 🚨 [Error] Failed save image: {e}')
        finally:
            self.finished.emit()


class FrameProcessor(QThread):
    processing_rgb_finished = pyqtSignal(object)
    processing_dp_finished = pyqtSignal(object)

    def __init__(self, calib_info: dict):
        super().__init__()
        self.processing_queue = queue.Queue()
        self.is_running = True
        self.calib_info = calib_info
        self.logger = init_logger(RMSL_LOG_PATH)
        self.utils = UtilsBasic()

        """-------------------------------------------------------------------------------------"""
        # Get config info
        self.config_file_path = str(PROJECT_ROOT / 'config.json')
        config_info = self.utils.read_json(self.config_file_path)
        self.view_mode = config_info['view_mode']
        self.work_mode = config_info.get('work_mode', 'rgbd')

        self.rgb_w, self.rgb_h = 1280, 1080
        if self.work_mode == 'rgbd':
            self.uvc_buf_w, self.uvc_buf_h = 1280, 1080
            self.disp_w, self.disp_h = 640, 352
        elif self.work_mode == '2rgb':
            self.uvc_buf_w, self.uvc_buf_h = 1080, 960  # 1280 * 1.5 // 2
            self.disp_w, self.disp_h = 640, 352
        elif self.work_mode == 'rgbd-debug':
            self.uvc_buf_w, self.uvc_buf_h = 1080, 3200
            self.disp_w, self.disp_h = 640, 352
        elif self.work_mode == '2rgb-debug':
            self.uvc_buf_w, self.uvc_buf_h = 1080, 2240
            self.disp_w, self.disp_h = 640, 352
        else:
            self.logger.log_to_file(f" 🚨 [Error] [FrameProcessor] Mode {self.work_mode} is not supported, "
                                    f"Convert to RGBD mode, ['rgbd', 'rgbd-debug', '2rgb', '2rgb-debug'].")
            raise ValueError(f" 🚨 [Error] [FrameProcessor] Mode {self.work_mode} is not supported, "
                             f"Convert to RGBD mode, ['rgbd', 'rgbd-debug', '2rgb', '2rgb-debug'].")

        # Depth Range
        self.min_depth_dis = config_info.get('min_distance_mm', 100)  # mm
        self.max_depth_dis = config_info.get('max_distance_mm', 5000)  # mm
        self.colorizer_mode = config_info.get('colorize_mode', 'jet_red2blue')

        """-------------------------------------------------------------------------------------"""

        # PseColorizer
        self.colorizer = Colorizer(mode=self.colorizer_mode)

        # initial image cache buffers
        self.img_rgb, self.img_disp, self.img_depth = None, None, None
        self.img_rgb_l, self.img_rgb_r = None, None
        self.img_ir_l, self.img_ir_r = None, None
        self.img_pse_color = None
        self.img_disp_diff = None
        self.img_last_disp = None

        """-------------------------------------------------------------------------------------"""

        self.q23 = self.calib_info['q23']
        self.q32 = self.calib_info['q32']
        self.q33 = self.calib_info['q33']

        if self.calib_info['category'] == 'rgbd':
            # RGBD, initial rgb distort
            new_camera_matrix, _ = cv2.getOptimalNewCameraMatrix(self.calib_info['rgb_intrinsic']['K'],
                                                                 self.calib_info['rgb_distortion']['D'],
                                                                 (self.calib_info['rgb_width'],
                                                                  self.calib_info['rgb_height']),
                                                                 alpha=0,
                                                                 newImgSize=(self.calib_info['rgb_width'],
                                                                             self.calib_info['rgb_height']))

            self.mapx, self.mapy = cv2.initUndistortRectifyMap(self.calib_info['rgb_intrinsic']['K'],
                                                               self.calib_info['rgb_distortion']['D'],
                                                               None,
                                                               new_camera_matrix,
                                                               (self.calib_info['rgb_width'],
                                                                self.calib_info['rgb_height']),
                                                               cv2.CV_32FC1)
        else:
            # 2RGB, 极线校正 + 畸变校正
            self.mapx, self.mapy = cv2.initUndistortRectifyMap(self.calib_info['l_intrinsic']['K_l'],
                                                               self.calib_info['l_distortion']['D_l'],
                                                               self.calib_info['R1'],
                                                               self.calib_info['P1'],
                                                               (self.calib_info['l_width'],
                                                                self.calib_info['l_height']),
                                                               cv2.CV_32FC1)

    def add_frame(self, frame_info):
        if self.processing_queue.qsize() > 2:
            while not self.processing_queue.empty():
                try:
                    self.processing_queue.get_nowait()
                except queue.Empty:
                    break
        self.processing_queue.put(frame_info)

    def run(self):
        while self.is_running:
            try:
                frame_data = self.processing_queue.get(timeout=1.0)
                expected_size = self.uvc_buf_w * self.uvc_buf_h * 2
                frame_array = np.frombuffer(frame_data, dtype=np.uint8)
                if len(frame_array) != expected_size:
                    self.logger.log_to_file(
                        f' ⚠️ [Warn] [FrameProcessor] Frame size mismatch: {len(frame_array)} != {expected_size}')
                    return

                # Initial
                self.img_rgb, self.img_disp, self.img_depth = None, None, None
                self.img_rgb_l, self.img_rgb_r = None, None
                self.img_ir_l, self.img_ir_r = None, None
                self.img_pse_color = None
                self.img_disp_diff = None

                header = self.parse_uvc_head(frame_array)
                head_size = 28

                if header['magic'] != 0xA55A:
                    # TODO: not process
                    timestamp = 0
                else:
                    timestamp = int(header['timestamp'] / 1000)  # us to ms

                if self.work_mode == '2rgb':
                    if header['type'] == 1:  # rgb frame
                        rgb_frame_data = frame_array[0: expected_size]
                        rgb_frame_data = rgb_frame_data.reshape(self.rgb_w * 3 // 2, self.rgb_h)
                        rgb_frames_rgb = cv2.cvtColor(rgb_frame_data, cv2.COLOR_YUV2RGB_NV12)

                        # clear head
                        rgb_frames_rgb[0, : head_size] = rgb_frames_rgb[1, : head_size]
                        origin_rgb_img = cv2.rotate(rgb_frames_rgb, cv2.ROTATE_90_CLOCKWISE)

                        # 校正RGB, check size
                        self.preprocess_rgb_for_correction(origin_rgb_img=origin_rgb_img)
                        result_rgb = {
                            'rgb': self.img_rgb,
                            'ir_l': self.img_ir_l,
                            'ir_r': self.img_ir_r,
                            'rgb_l': self.img_rgb_l,
                            'rgb_r': self.img_rgb_r,
                            'diff': self.img_disp_diff,
                            'timestamp': timestamp,
                            'origin_rgb': origin_rgb_img
                        }
                        self.processing_rgb_finished.emit(result_rgb)
                    elif header['type'] == 2:  # dp
                        depth_w, depth_h = header['width'], header['height']
                        dp_frame_data = frame_array[0:depth_h * depth_w * 2]
                        dp_frame_16bit = np.frombuffer(dp_frame_data, dtype=np.uint16).copy()
                        dp_frame_16bit = dp_frame_16bit.reshape((depth_h, depth_w))

                        # clear head
                        dp_frame_16bit[0, : (head_size // 2)] = dp_frame_16bit[1, : (head_size // 2)]
                        self.img_disp = dp_frame_16bit

                        if self.view_mode == 'depth' and self.img_disp is not None:
                            # convert disp to depth
                            self.img_depth = self.utils.disp_to_depth_vectorized(self.img_disp, self.q23, self.q32,
                                                                                 self.q33,
                                                                                 subpixel_value=64,
                                                                                 zoom_ratio=1.0)
                            self.img_pse_color = self.colorizer.colorize(self.img_depth,
                                                                         depth_range=(
                                                                             self.min_depth_dis, self.max_depth_dis))
                        elif self.img_disp is not None:
                            self.img_pse_color = self.colorizer.colorize(self.img_disp,
                                                                         depth_range=(10, 5120))
                        else:
                            self.img_pse_color = None

                        result = {
                            'disp': self.img_disp,
                            'depth': self.img_depth,
                            'pse_color': self.img_pse_color,
                            'timestamp': timestamp
                        }

                        self.processing_dp_finished.emit(result)
                    else:
                        self.logger.log_to_file(f" 🚨 [Error] Header mode just support [1: rgb, 2: disp] ")
                elif self.work_mode == '2rgb-debug':
                    rgb_img_size = self.rgb_w * self.rgb_h * 3 // 2  # nv12
                    depth_img_size = self.disp_w * self.disp_h * 2

                    total_frames = len(frame_array) // rgb_img_size  # 计算实际帧数
                    frames = []
                    for i in range(total_frames):
                        start = i * rgb_img_size
                        end = (i + 1) * rgb_img_size
                        frame_data = frame_array[start:end]
                        frames.append(frame_data.reshape((self.rgb_w * 3 // 2, self.rgb_h)))
                    frames_rgb = [cv2.cvtColor(frame, cv2.COLOR_YUV2RGB_NV12) for frame in frames]

                    # Gen diff
                    dp_offset = total_frames * rgb_img_size
                    disp_frame_data = frame_array[dp_offset:(dp_offset + depth_img_size)]
                    dp_frame_16bit = np.frombuffer(disp_frame_data, dtype=np.uint16)
                    disp_frame = dp_frame_16bit.reshape((self.disp_h, self.disp_w))
                    self.img_disp = disp_frame

                    if self.img_last_disp is not None:
                        # 构造 mask：两者都非零的位置为 True
                        valid_mask = (disp_frame != 0) & (self.img_last_disp != 0)
                        diff = disp_frame - self.img_last_disp
                        diff[~valid_mask] = 0
                        self.img_disp_diff = self.colorizer.colorize(diff, depth_range=(10, 256), normalize=False)

                    # update
                    self.img_last_disp = disp_frame

                    self.img_rgb_l = cv2.rotate(frames_rgb[0], cv2.ROTATE_90_CLOCKWISE)
                    self.img_rgb_r = cv2.rotate(frames_rgb[1], cv2.ROTATE_90_CLOCKWISE)
                    origin_rgb_img = self.img_rgb_l
                    self.preprocess_rgb_for_correction(origin_rgb_img=origin_rgb_img)

                    result_rgb = {
                        'rgb': self.img_rgb,
                        'ir_l': self.img_ir_l,
                        'ir_r': self.img_ir_r,
                        'rgb_l': self.img_rgb_l,
                        'rgb_r': self.img_rgb_r,
                        'diff': self.img_disp_diff,
                        'timestamp': timestamp,
                        'origin_rgb': origin_rgb_img
                    }
                    self.processing_rgb_finished.emit(result_rgb)

                    if self.view_mode == 'depth' and self.img_disp is not None:
                        # convert disp to depth
                        self.img_depth = self.utils.disp_to_depth_vectorized(self.img_disp, self.q23, self.q32,
                                                                             self.q33,
                                                                             subpixel_value=64,
                                                                             zoom_ratio=1.0)
                        self.img_pse_color = self.colorizer.colorize(self.img_depth,
                                                                     depth_range=(
                                                                         self.min_depth_dis, self.max_depth_dis))
                    elif self.img_disp is not None:
                        self.img_pse_color = self.colorizer.colorize(self.img_disp,
                                                                     depth_range=(10, 5120))
                    else:
                        self.img_pse_color = None

                    result = {
                        'disp': self.img_disp,
                        'depth': self.img_depth,
                        'pse_color': self.img_pse_color,
                        'timestamp': timestamp
                    }

                    self.processing_dp_finished.emit(result)
                elif self.work_mode == 'rgbd':
                    rgb_img_size = self.rgb_w * self.rgb_h * 3 // 2  # nv12
                    depth_img_size = self.disp_w * self.disp_h * 2  # uint16

                    rgb_frame_data = frame_array[0: rgb_img_size]
                    rgb_frame_data = rgb_frame_data.reshape(self.rgb_w * 3 // 2, self.rgb_h)
                    rgb_frames_rgb = cv2.cvtColor(rgb_frame_data, cv2.COLOR_YUV2RGB_NV12)
                    origin_rgb_frame = cv2.rotate(rgb_frames_rgb, cv2.ROTATE_90_CLOCKWISE)
                    self.img_rgb = cv2.remap(origin_rgb_frame, self.mapx, self.mapy, cv2.INTER_LINEAR)

                    result_rgb = {
                        'rgb': self.img_rgb,
                        'ir_l': self.img_ir_l,
                        'ir_r': self.img_ir_r,
                        'rgb_l': self.img_rgb_l,
                        'rgb_r': self.img_rgb_r,
                        'diff': self.img_disp_diff,
                        'timestamp': timestamp,
                        'origin_rgb': origin_rgb_frame
                    }
                    self.processing_rgb_finished.emit(result_rgb)

                    # process disp
                    disp_frame_data = frame_array[rgb_img_size: (rgb_img_size + depth_img_size)]
                    dp_frame_16bit = np.frombuffer(disp_frame_data, dtype=np.uint16)
                    self.img_disp = dp_frame_16bit.reshape((self.disp_h, self.disp_w))

                    if self.view_mode == 'depth' and self.img_disp is not None:
                        # convert disp to depth
                        self.img_depth = self.utils.disp_to_depth_vectorized(self.img_disp, self.q23, self.q32,
                                                                             self.q33,
                                                                             subpixel_value=64,
                                                                             zoom_ratio=1.0)
                        self.img_pse_color = self.colorizer.colorize(self.img_depth,
                                                                     depth_range=(
                                                                         self.min_depth_dis, self.max_depth_dis))
                    elif self.img_disp is not None:
                        self.img_pse_color = self.colorizer.colorize(self.img_disp,
                                                                     depth_range=(10, 5120))
                    else:
                        self.img_pse_color = None

                    result = {
                        'disp': self.img_disp,
                        'depth': self.img_depth,
                        'pse_color': self.img_pse_color,
                        'timestamp': timestamp
                    }
                    self.processing_dp_finished.emit(result)
                elif self.work_mode == 'rgbd-debug':
                    rgb_img_size = self.rgb_w * self.rgb_h * 3 // 2  # nv12
                    depth_img_size = self.disp_w * self.disp_h * 2  # uint16

                    total_frames = len(frame_array) // rgb_img_size  # 计算实际帧数
                    frames = []
                    for i in range(total_frames):
                        start = i * rgb_img_size
                        end = (i + 1) * rgb_img_size
                        frame_data = frame_array[start:end]
                        frames.append(frame_data.reshape((self.rgb_w * 3 // 2, self.rgb_h)))
                    frames_rgb = [cv2.cvtColor(frame, cv2.COLOR_YUV2RGB_NV12) for frame in frames]

                    # Gen diff
                    total_frames = len(frame_array) // rgb_img_size
                    dp_offset = total_frames * rgb_img_size
                    disp_frame_data = frame_array[dp_offset:(dp_offset + depth_img_size)]
                    dp_frame_16bit = np.frombuffer(disp_frame_data, dtype=np.uint16)
                    disp_frame = dp_frame_16bit.reshape((self.disp_h, self.disp_w))
                    self.img_disp = disp_frame

                    if self.img_last_disp is not None:
                        # 构造 mask：两者都非零的位置为 True
                        valid_mask = (disp_frame != 0) & (self.img_last_disp != 0)
                        diff = disp_frame - self.img_last_disp
                        diff[~valid_mask] = 0
                        self.img_disp_diff = self.colorizer.colorize(diff, depth_range=(10, 256), normalize=False)

                    self.img_last_disp = disp_frame

                    # u8
                    self.img_ir_l = cv2.rotate(frames_rgb[0], cv2.ROTATE_90_CLOCKWISE)
                    self.img_ir_r = cv2.rotate(frames_rgb[1], cv2.ROTATE_90_CLOCKWISE)
                    origin_rgb_frame = cv2.rotate(frames_rgb[2], cv2.ROTATE_90_CLOCKWISE)
                    self.img_rgb = cv2.remap(origin_rgb_frame, self.mapx, self.mapy, cv2.INTER_LINEAR)

                    result_rgb = {
                        'rgb': self.img_rgb,
                        'ir_l': self.img_ir_l,
                        'ir_r': self.img_ir_r,
                        'rgb_l': self.img_rgb_l,
                        'rgb_r': self.img_rgb_r,
                        'diff': self.img_disp_diff,
                        'timestamp': timestamp,
                        'origin_rgb': origin_rgb_frame
                    }
                    self.processing_rgb_finished.emit(result_rgb)

                    if self.view_mode == 'depth' and self.img_disp is not None:
                        # convert disp to depth
                        self.img_depth = self.utils.disp_to_depth_vectorized(self.img_disp, self.q23, self.q32,
                                                                             self.q33,
                                                                             subpixel_value=64,
                                                                             zoom_ratio=1.0)
                        self.img_pse_color = self.colorizer.colorize(self.img_depth,
                                                                     depth_range=(
                                                                         self.min_depth_dis, self.max_depth_dis))
                    elif self.img_disp is not None:
                        self.img_pse_color = self.colorizer.colorize(self.img_disp,
                                                                     depth_range=(10, 5120))
                    else:
                        self.img_pse_color = None

                    result = {
                        'disp': self.img_disp,
                        'depth': self.img_depth,
                        'pse_color': self.img_pse_color,
                        'timestamp': 0
                    }
                    self.processing_dp_finished.emit(result)

                else:
                    self.logger.log_to_file(f" 🚨 [Error] [FrameProcessor] Mode {self.work_mode} is not supported.")

                    result_rgb = {
                        'rgb': self.img_rgb,
                        'ir_l': self.img_ir_l,
                        'ir_r': self.img_ir_r,
                        'rgb_l': self.img_rgb_l,
                        'rgb_r': self.img_rgb_r,
                        'diff': self.img_disp_diff,
                        'timestamp': timestamp,
                        'origin_rgb': None
                    }

                    result_depth = {
                        'disp': self.img_disp,
                        'depth': self.img_depth,
                        'pse_color': self.img_pse_color,
                        'timestamp': timestamp
                    }
                    # Get Result
                    self.processing_rgb_finished.emit(result_rgb)
                    self.processing_dp_finished.emit(result_depth)
            except queue.Empty:
                continue
            except Exception as e:
                self.logger.log_to_file(f" 🎉 [Info] Failed to process frame buffer: {e}")

    def preprocess_rgb_for_correction(self, origin_rgb_img: np.ndarray):
        if self.calib_info['category'] == '2rgb':
            rgb_w = self.calib_info['l_width']
            rgb_h = self.calib_info['l_height']
        else:
            rgb_w = self.calib_info['rgb_width']
            rgb_h = self.calib_info['rgb_height']

        if self.rgb_w == rgb_w * 2:
            self.img_rgb = origin_rgb_img[::2, ::2]
            crop_total = int(self.rgb_h / 2 - rgb_h)
            crop_top = crop_total // 2
            crop_bottom = crop_total - crop_top
            self.img_rgb = self.img_rgb[crop_top: int(self.rgb_h / 2) - crop_bottom, :]
        else:
            self.img_rgb = origin_rgb_img

        self.img_rgb = cv2.remap(self.img_rgb, self.mapx, self.mapy, cv2.INTER_LINEAR)

    def parse_uvc_head(self, frame_data: Union[bytes, np.ndarray]) -> Dict[str, Any]:
        if isinstance(frame_data, np.ndarray):
            buf = frame_data[:28].tobytes()
        else:
            buf = bytes(frame_data)[:28]

        if len(buf) < 28:
            self.logger.log_to_file(f" 🚨 [Error] frame_data size not match 28 Bytes，Can not parser struct: uvc_head_v1")
            raise ValueError(" 🚨 [Error] frame_data size not match 28 Bytes，Can not parser struct: uvc_head_v1")

        fields = struct.unpack('<HHHHHH I Q I', buf)
        keys = ['magic', 'version', 'size', 'type',
                'width', 'height', 'sequence', 'timestamp', 'reserve']
        return dict(zip(keys, fields))

    def stop(self):
        self.is_running = False
        self.quit()
        self.wait()


class RMSLViewer(QWidget, Ui_RMSLViewer):
    def __init__(self):
        super(RMSLViewer, self).__init__()
        self.logger = init_logger(RMSL_LOG_PATH)
        self.config_file_path = str(PROJECT_ROOT / 'config.json')

        self.utils = UtilsBasic()
        self.uvc_fps = 30  # default

        """-------------------------------------------------------------------------------------"""
        # icons change
        self.icon_open_inactive = QIcon(":/image/image/stop.png")
        self.icon_open_active = QIcon(":/image/image/start.png")
        self.icon_lock_inactive = QIcon(":/image/image/unlock.png")
        self.icon_lock_active = QIcon(":/image/image/lock.png")
        self.icon_3d_inactive = QIcon(":/image/image/2d.png")
        self.icon_3d_active = QIcon(":/image/image/3d.png")

        # status
        self.is_started = False
        self.is_locked = False
        self.is_3d = False

        # global status
        self.is_running = False  # Ensure Capture Thread running

        """-------------------------------------------------------------------------------------"""
        # cache and save
        self.img_rgb = None  # rgb

        # rgb debug
        self.img_rgb_l = None  # rgb base(left)
        self.img_rgb_r = None

        # ir debug
        self.img_ir_l = None  # speckle-left
        self.img_ir_r = None  # speckle-right

        # debug
        self.img_disp_diff = None  # disp diff

        self.img_disp = None  # disp
        self.img_depth = None  # depth
        self.img_pse_color = None  # pse color of depth
        self.point_cloud = None  # pointcloud
        self.timestamp_rgb = 0
        self.timestamp_disp = 0
        self.matcher = SensorDataMatcher()

        """-------------------------------------------------------------------------------------"""
        # beautiful message box
        self.message_box_style = MessageBox_Style
        self.setup_message_box_style()

        """-------------------------------------------------------------------------------------"""
        # ui initial
        # scene and image item, for preview.
        self.scene_rgb = QGraphicsScene()
        self.scene_depth = QGraphicsScene()
        self.image_item_rgb = QGraphicsPixmapItem()
        self.image_item_depth = QGraphicsPixmapItem()

        self.setupUi(self)
        self.button_init()
        self.linedit_init()
        self.textbrowser_init()
        self.view_init()

        """-------------------------------------------------------------------------------------"""
        # 安装事件过滤器（监听点击）
        self.view_rgb.viewport().installEventFilter(self)
        self.view_depth.viewport().installEventFilter(self)

        """-------------------------------------------------------------------------------------"""
        # bind logger
        self.logger.log_signal.connect(self.log_update)

        """-------------------------------------------------------------------------------------"""
        # Get config info
        config_info = self.utils.read_json(self.config_file_path)
        self.work_mode = config_info.get('work_mode', 'rgbd')
        self.rgb_w, self.rgb_h = 1280, 1080
        if self.work_mode == 'rgbd':
            self.uvc_buf_w, self.uvc_buf_h = 1280, 1080
            self.disp_w, self.disp_h = 640, 352
        elif self.work_mode == '2rgb':
            self.uvc_buf_w, self.uvc_buf_h = 1080, 960  # 1280 * 1.5 // 2
            self.disp_w, self.disp_h = 640, 352
        elif self.work_mode == 'rgbd-debug':
            self.uvc_buf_w, self.uvc_buf_h = 1080, 3200
            self.disp_w, self.disp_h = 640, 352
        elif self.work_mode == '2rgb-debug':
            self.uvc_buf_w, self.uvc_buf_h = 1080, 2240
            self.disp_w, self.disp_h = 640, 352
        else:
            self.logger.log_to_file(f" 🚨 [Error] [FrameProcessor] Mode {self.work_mode} is not supported, "
                                    f"Convert to RGBD mode, ['rgbd', 'rgbd-debug', '2rgb', '2rgb-debug'].")
            raise ValueError(f" 🚨 [Error] [FrameProcessor] Mode {self.work_mode} is not supported, "
                             f"Convert to RGBD mode, ['rgbd', 'rgbd-debug', '2rgb', '2rgb-debug'].")

        """-------------------------------------------------------------------------------------"""
        # Get calib info
        self.calib_yaml_path = str(PROJECT_ROOT / 'config/calibration.yaml')
        self.calib_yaml_name = f"calib_param_ir{self.disp_w}x{self.disp_h}_rgb{self.rgb_w}x{self.rgb_h}.yaml"
        self.calib_yaml_evb_path = f'/oem/usr/share/calib_data/{self.calib_yaml_name}'
        if not self.utils.is_file_on_device(self.calib_yaml_evb_path):
            self.calib_yaml_name = f"calib_param_{self.disp_w}x{self.disp_h}.yaml"
            self.calib_yaml_evb_path = f'/oem/usr/share/calib_data/{self.calib_yaml_name}'
            self.logger.log_to_file(f' 🎉 [Info] Change calib file to: {self.calib_yaml_evb_path}')

        if not self.utils.pull_file_from_single_device(self.calib_yaml_evb_path, self.calib_yaml_path):
            raise FileNotFoundError(" 🚨 [Error] Check device connect and calib yaml!")

        parser = ParserCamIntrinsic(self.calib_yaml_path)
        self.calib_info = parser.get_all_camera_intrinsic()

        # uvc
        self.uvc_name = "UVC Camera"

        """-------------------------------------------------------------------------------------"""
        self.reader = None
        self.view_mode = config_info['view_mode']

        self.image_processor = FrameProcessor(calib_info=self.calib_info)
        self.image_processor.processing_rgb_finished.connect(self.on_processing_rgb_finished)
        self.image_processor.processing_dp_finished.connect(self.on_processing_dp_finished)
        self.image_processor.start()

        self.val_fps = 5
        self.timer = QTimer(self)
        self.timer.timeout.connect(self.process_frame_fast)
        self.timer.start(self.val_fps)

        self.show()

    def setup_message_box_style(self):
        QApplication.instance().setStyleSheet(QApplication.instance().styleSheet() + self.message_box_style)

    def log_update(self, msg):
        self.terminal_log.append(msg)
        scrollbar = self.terminal_log.verticalScrollBar()
        scrollbar.setValue(scrollbar.maximum())

    def button_init(self):
        self.pushButton_select_config_file.setEnabled(True)
        self.pushButton_select_config_file.clicked.connect(self.select_config_file)

        self.pushButton_start.setEnabled(True)
        self.pushButton_start.setIcon(self.icon_open_inactive)
        self.pushButton_start.clicked.connect(self.start_and_stop_process)

        self.pushButton_view_mode.setEnabled(True)
        self.pushButton_view_mode.setIcon(self.icon_3d_inactive)
        self.pushButton_view_mode.clicked.connect(self.view_mode_change)

        self.pushButton_lock.setEnabled(True)
        self.pushButton_lock.setIcon(self.icon_lock_inactive)
        self.pushButton_lock.clicked.connect(self.lock_and_unlock_view)

        self.pushButton_download.setEnabled(True)
        self.pushButton_download.clicked.connect(self.download_image_and_pointcloud)

    def linedit_init(self):
        self.lineedit_config_file_path.setText(str(self.config_file_path))
        self.lineedit_config_file_path.returnPressed.connect(self.update_config_file_path)

    def textbrowser_init(self):
        self.terminal_log.setReadOnly(True)
        self.terminal_log.setTextInteractionFlags(
            Qt.TextSelectableByMouse | Qt.TextSelectableByKeyboard
        )
        self.terminal_log.setOpenExternalLinks(True)
        self.terminal_log.setPlaceholderText(" ⏳ RMSLViewer Log terminal, waiting...")

    def view_init(self):
        self.view_rgb.setRenderHint(QPainter.SmoothPixmapTransform)
        self.view_rgb.setRenderHint(QPainter.Antialiasing)
        self.view_rgb.setOptimizationFlags(QGraphicsView.DontAdjustForAntialiasing)
        self.view_rgb.setOptimizationFlags(QGraphicsView.DontSavePainterState)
        self.view_rgb.setViewportUpdateMode(QGraphicsView.FullViewportUpdate)

        self.view_depth.setRenderHint(QPainter.SmoothPixmapTransform)
        self.view_depth.setRenderHint(QPainter.Antialiasing)
        self.view_depth.setOptimizationFlags(QGraphicsView.DontAdjustForAntialiasing)
        self.view_depth.setOptimizationFlags(QGraphicsView.DontSavePainterState)
        self.view_depth.setViewportUpdateMode(QGraphicsView.FullViewportUpdate)

        # center visual
        self.view_rgb.centerOn(self.image_item_rgb)
        self.view_rgb.setAlignment(Qt.AlignCenter)
        self.view_depth.centerOn(self.image_item_depth)
        self.view_depth.setAlignment(Qt.AlignCenter)

    def update_config_file_path(self):
        text = self.lineedit_config_file_path.text()
        if os.path.exists(text):
            self.config_file_path = text
        else:
            self.logger.log(f" ⚠️ [Warn] Config file(*.json): {text} not exist")
            QMessageBox.warning(self, 'Warning', f'Config file: {text} does not exist!')

    def select_config_file(self):
        config_json_path, _ = QFileDialog.getOpenFileName(self,
                                                          "Select config file(config.json)",
                                                          ".",
                                                          "Config files(*.json);;all files (*.*)")
        if not config_json_path or not os.path.exists(config_json_path):
            self.logger.log(f" 🚨 [Error] File path: {config_json_path} not exists")
            QMessageBox.warning(self,
                                "File Not Found",
                                f"File path: {config_json_path} not exists")
            self.lineedit_iq_file_path.setText(str(PROJECT_ROOT / 'config.json'))
        else:
            self.config_file_path = config_json_path
            self.logger.log(f' 🎉 [Info] Success select iq file path: {config_json_path}')

    def start_and_stop_process(self):
        if not self.is_started:
            # update icon
            self.pushButton_start.setIcon(self.icon_open_active)
            # load image to graphview
            self.start_process_uvc_buffer()
            self.is_started = True
            self.logger.log(f' 🎉 [Info] Started uvc thread...')
        else:
            # update icon
            self.pushButton_start.setIcon(self.icon_open_inactive)
            # stop load image to graphview
            self.stop_process_uvc_buffer()
            self.is_started = False
            self.logger.log(f' 🎉 [Info] Stopped uvc thread, waiting...')

    def start_process_uvc_buffer(self):
        # 开启数据流
        self.reader = UVCReader(
            camera_name='UVC Camera',
            width=self.uvc_buf_w,
            height=self.uvc_buf_h,
            fps=self.uvc_fps
        )
        self.reader.start()
        self.is_running = True

    def stop_process_uvc_buffer(self):
        if self.reader is not None and self.reader.is_alive():
            self.reader.stop()
        self.reader = None
        self.is_running = False

    def view_mode_change(self):
        if not self.is_3d:
            # enable vis-pointcloud mode
            self.is_3d = True
            self.pushButton_view_mode.setIcon(self.icon_3d_active)
            QMessageBox.warning(self, 'Warning', 'Current version not support.')
            self.logger.log(f" 🎉 [Info] Convert to 3D-Points mode, Current version not support.")
        else:
            self.is_3d = False
            self.pushButton_view_mode.setIcon(self.icon_3d_inactive)
            self.logger.log(f" 🎉 [Info] Convert to 2D-Image mode.")

    def lock_and_unlock_view(self):
        if not self.is_locked:
            # close timer, not flush ui
            self.timer.stop()
            self.is_locked = True
            self.pushButton_lock.setIcon(self.icon_lock_active)
            self.logger.log(f" 🎉 [Info] Lock view.")
        else:
            self.timer.start(self.val_fps)
            self.is_locked = False
            self.pushButton_lock.setIcon(self.icon_lock_inactive)
            self.logger.log(f" 🎉 [Info] Unlock view.")

    def download_image_and_pointcloud(self):
        images_dict = {'rgb': self.img_rgb,
                       'disp': self.img_disp,
                       'depth': self.img_depth,
                       'pse_color': self.img_pse_color,
                       'ir_l': self.img_ir_l,
                       'ir_r': self.img_ir_r,
                       'rgb_l': self.img_rgb_l,
                       'rgb_r': self.img_rgb_r,
                       'diff': self.img_disp_diff}

        rgb_data, depth_data = self.matcher.find_best_match()
        if rgb_data is None or depth_data is None:
            self.logger.log_to_file(" ⚠️ [Warn] No matched data available")
        else:
            matched_rgb = rgb_data['img']
            matched_disp = depth_data['disp']
            matched_depth = depth_data['depth']
            matched_pse_color = depth_data['pse_color']
            rgb_timestamp = rgb_data['timestamp']
            depth_timestamp = depth_data['timestamp']

            time_diff = abs(depth_timestamp - rgb_timestamp)
            self.logger.log_to_file(f" 🎉 [Info] Using matched data with time difference: {time_diff}ms")

            # update
            images_dict = {'rgb': matched_rgb,
                           'disp': matched_disp,
                           'depth': matched_depth,
                           'pse_color': matched_pse_color,
                           'ir_l': self.img_ir_l,
                           'ir_r': self.img_ir_r,
                           'rgb_l': self.img_rgb_l,
                           'rgb_r': self.img_rgb_r,
                           'diff': self.img_disp_diff}

        self.pushButton_download.setEnabled(False)
        self.logger.log(f' 🎉 [Info] Starting Download images and pointcloud.')

        self.thread = QThread()
        self.worker = SaveWorker(images_dict, self.calib_info)
        self.worker.moveToThread(self.thread)

        # 连接信号和槽
        # 1. 线程启动后，执行worker的耗时任务
        self.thread.started.connect(self.worker.download_image_and_pointcloud)

        # 2. worker完成后，发出finished信号
        self.worker.finished.connect(self.thread.quit)  # 任务完成后，请求线程退出
        self.worker.finished.connect(self.worker.deleteLater)  # 清理worker对象
        self.thread.finished.connect(self.thread.deleteLater)  # 线程退出后，清理线程对象
        self.worker.finished.connect(lambda: self.pushButton_download.setEnabled(True))  # 任务完成后恢复按钮
        self.worker.finished.connect(lambda: self.log_update(" 🎉 [Info] Success download images and pointcloud."))

        # 3. 连接worker的日志信号到主线程的log_message槽
        self.worker.log_message.connect(self.log_update)

        # --- 步骤 5: 启动线程 ---
        self.thread.start()

    def process_frame_fast(self):
        if self.reader is not None and self.is_running:
            frames_received = 0
            frame_data = self.reader.get_frame()
            if frame_data:
                frames_received += 1

                self.image_processor.add_frame(frame_data)
        else:
            time.sleep(0.01)

    def on_processing_rgb_finished(self, result):
        self.img_rgb = result.get('rgb', None)
        if self.work_mode == 'rgbd-debug':
            self.img_ir_l = result.get('ir_l', None)
            self.img_ir_r = result.get('ir_r', None)
        if self.work_mode == '2rgb-debug':
            self.img_rgb_l = result.get('rgb_l', None)
            self.img_rgb_r = result.get('rgb_r', None)
        self.timestamp_rgb = result.get('timestamp', 0)
        origin_rgb_img = result.get('origin_rgb', None)

        if self.img_rgb is not None:
            self.matcher.add_rgb_data(self.img_rgb, self.timestamp_rgb)

        if self.work_mode == 'rgbd-debug' or self.work_mode == '2rgb-debug':
            self.img_disp_diff = result.get('diff', None)
            if self.img_disp_diff is None:
                self.logger.log_to_file(" ⚠️ [Warn] Difference image not found.")
                self.img_disp_diff = self.img_rgb[::2, ::2]

        if origin_rgb_img is not None:
            if self.work_mode == 'rgbd-debug' and self.img_ir_l is not None and self.img_ir_r is not None:
                ir_l_ds = self.img_ir_l[::2, ::2]
                ir_r_ds = self.img_ir_r[::2, ::2]
                rgb_ds = origin_rgb_img[::2, ::2]
                diff_ds = self.img_disp_diff
                max_height = max(rgb_ds.shape[0], diff_ds.shape[0], ir_l_ds.shape[0], ir_r_ds.shape[0])
                max_width = max(rgb_ds.shape[1], diff_ds.shape[1], ir_l_ds.shape[1], ir_r_ds.shape[1])

                def pad_to_size(img, target_h, target_w):
                    h, w = img.shape[:2]
                    if len(img.shape) == 3:
                        padded = np.zeros((target_h, target_w, img.shape[2]), dtype=img.dtype)
                        padded[:h, :w] = img
                    else:
                        padded = np.zeros((target_h, target_w), dtype=img.dtype)
                        padded[:h, :w] = img
                    return padded

                rgb_ds = pad_to_size(rgb_ds, max_height, max_width)
                diff_ds = pad_to_size(diff_ds, max_height, max_width)
                ir_l_ds = pad_to_size(ir_l_ds, max_height, max_width)
                ir_r_ds = pad_to_size(ir_r_ds, max_height, max_width)

                top_row = np.hstack([rgb_ds, diff_ds])
                bottom_row = np.hstack([ir_l_ds, ir_r_ds])
                color_frame = np.vstack([top_row, bottom_row])
            elif self.work_mode == '2rgb-debug' and self.img_rgb_l is not None and self.img_rgb_r is not None:
                rgb_l_ds = self.img_rgb_l[::2, ::2]
                rgb_r_ds = self.img_rgb_r[::2, ::2]
                rgb_ds = origin_rgb_img[::2, ::2]
                diff_ds = self.img_disp_diff
                max_height = max(rgb_ds.shape[0], diff_ds.shape[0], rgb_l_ds.shape[0], rgb_r_ds.shape[0])
                max_width = max(rgb_ds.shape[1], diff_ds.shape[1], rgb_l_ds.shape[1], rgb_r_ds.shape[1])

                def pad_to_size(img, target_h, target_w):
                    h, w = img.shape[:2]
                    if len(img.shape) == 3:
                        padded = np.zeros((target_h, target_w, img.shape[2]), dtype=img.dtype)
                        padded[:h, :w] = img
                    else:
                        padded = np.zeros((target_h, target_w), dtype=img.dtype)
                        padded[:h, :w] = img
                    return padded

                rgb_ds = pad_to_size(rgb_ds, max_height, max_width)
                diff_ds = pad_to_size(diff_ds, max_height, max_width)
                rgb_l_ds = pad_to_size(rgb_l_ds, max_height, max_width)
                rgb_r_ds = pad_to_size(rgb_r_ds, max_height, max_width)

                top_row = np.hstack([rgb_ds, diff_ds])
                bottom_row = np.hstack([rgb_l_ds, rgb_r_ds])
                color_frame = np.vstack([top_row, bottom_row])
            else:
                color_frame = origin_rgb_img

            color_img = QImage(color_frame,
                               color_frame.shape[1],
                               color_frame.shape[0],
                               QImage.Format_RGB888)
            if not color_img.isNull():
                pixmap = QPixmap.fromImage(color_img)
                self.image_item_rgb.setPixmap(pixmap)
                self.image_item_rgb.setZValue(0)
                self.show_rgb_frame()

    def on_processing_dp_finished(self, result):
        self.img_disp = result.get('disp', None)
        self.img_depth = result.get('depth', None)
        self.img_pse_color = result.get('pse_color', None)
        self.timestamp_disp = result.get('timestamp', 0)

        if self.img_disp is not None:
            self.matcher.add_depth_data(self.img_disp,
                                        self.img_depth,
                                        self.timestamp_disp,
                                        pse_color_depth_img=self.img_pse_color)

        if self.img_pse_color is not None:
            img_pse_color = QImage(self.img_pse_color,
                                   self.img_pse_color.shape[1],
                                   self.img_pse_color.shape[0],
                                   QImage.Format_RGB888)

            if not img_pse_color.isNull():
                pixmap = QPixmap.fromImage(img_pse_color)
                self.image_item_depth.setPixmap(pixmap)
                self.image_item_depth.setZValue(0)
                self.show_depth_frame()

    def show_depth_frame(self):
        if not self.view_depth.scene():
            self.scene_depth.addItem(self.image_item_depth)
        self.view_depth.setScene(self.scene_depth)
        self.view_depth.fitInView(self.image_item_depth, Qt.KeepAspectRatio)

    def show_rgb_frame(self):
        if not self.view_rgb.scene():
            self.scene_rgb.addItem(self.image_item_rgb)
        self.view_rgb.setScene(self.scene_rgb)
        self.view_rgb.fitInView(self.image_item_rgb, Qt.KeepAspectRatio)

    def resizeEvent(self, event):
        super().resizeEvent(event)
        if not self.image_item_rgb.pixmap().isNull():
            self.view_rgb.fitInView(self.image_item_rgb, Qt.KeepAspectRatio)
        if not self.image_item_depth.pixmap().isNull():
            self.view_depth.fitInView(self.image_item_depth, Qt.KeepAspectRatio)

    def closeEvent(self, event):
        self.stop_process_uvc_buffer()
        self.image_processor.stop()
        super().closeEvent(event)
        event.accept()

    def eventFilter(self, obj, event):
        if not self.is_locked:
            return super().eventFilter(obj, event)

        if event.type() == QEvent.MouseButtonPress and event.button() == Qt.LeftButton:
            if obj is self.view_rgb.viewport():
                if self._show_image_tooltip(self.view_rgb, self.image_item_rgb, event):
                    return True
            elif obj is self.view_depth.viewport():
                if self._show_image_tooltip(self.view_depth, self.image_item_depth, event, en_distance=True):
                    return True

        return super().eventFilter(obj, event)

    def _show_image_tooltip(self, view: QGraphicsView, item: QGraphicsPixmapItem, event, en_distance: bool = False):
        if item is None:
            return False

        pix = item.pixmap()
        if pix.isNull():
            return False

        vp_pos = event.pos()  # QPoint (Qt5)
        # 1) 命中测试：只在点到图像上时处理，避免留白区域误判
        hit_item = view.itemAt(vp_pos)
        if hit_item is not item:
            return False

        # 2) 坐标映射
        scene_pos = view.mapToScene(vp_pos)
        item_pos = item.mapFromScene(scene_pos)  # QPointF (item局部坐标，未缩放的逻辑坐标)

        # 3) 先用浮点做边界判断，避免 int 截断造成的负数变0
        dpr = pix.devicePixelRatio()  # HiDPI
        img_w = pix.width() / dpr
        img_h = pix.height() / dpr

        fx = item_pos.x()
        fy = item_pos.y()
        if not (0.0 <= fx < img_w and 0.0 <= fy < img_h):
            return False

        # 4) 再转像素坐标索引（向下取整）
        ix = int(fx * dpr)
        iy = int(fy * dpr)

        img = pix.toImage()
        # Qt5 一般有 pixelColor；如果你环境不支持，可改用 img.pixel + QColor
        if hasattr(img, "pixelColor"):
            c = img.pixelColor(ix, iy)
        else:
            rgb = img.pixel(ix, iy)
            c = QColor(rgb)

        # 5) 用 viewport 作为 widget & 坐标基准，避免窗口放大后 tooltip 定位异常
        gpos = view.viewport().mapToGlobal(vp_pos)
        if en_distance:
            if self.img_depth is None or self.img_depth.size == 0:
                QToolTip.showText(gpos,
                                  f"({int(fx)}, {int(fy)})  RGB: ({c.red()}, {c.green()}, {c.blue()})",
                                  view.viewport())
                return False

            depth_h, depth_w = self.img_depth.shape[:2]

            normalized_x = fx / img_w
            normalized_y = fy / img_h

            real_pos_x = int(normalized_x * depth_w)
            real_pos_y = int(normalized_y * depth_h)

            real_pos_x = min(real_pos_x, depth_w - 1)
            real_pos_y = min(real_pos_y, depth_h - 1)

            distance = self.img_depth[real_pos_y, real_pos_x]

            QToolTip.showText(gpos,
                              f"({int(fx)}, {int(fy)})  RGB: ({c.red()}, {c.green()}, {c.blue()})  Distance: {distance:.2f} mm",
                              view.viewport())
        else:
            QToolTip.showText(gpos,
                              f"({int(fx)}, {int(fy)})  RGB: ({c.red()}, {c.green()}, {c.blue()})",
                              view.viewport())
        return True


def main():
    QApplication.setAttribute(Qt.AA_EnableHighDpiScaling, True)
    QApplication.setAttribute(Qt.AA_UseHighDpiPixmaps, True)

    if hasattr(Qt, 'HighDpiScaleFactorRoundingPolicy'):
        QApplication.setHighDpiScaleFactorRoundingPolicy(
            Qt.HighDpiScaleFactorRoundingPolicy.PassThrough)

    app = QApplication(sys.argv)
    app.setStyleSheet("""
    QToolTip {
        background-color: rgb(230, 230, 230);
        color: black;
        border: None;
        font-family: "YaHei Consolas Hybrid";
        font-size: 8pt;
    }
    """)
    demo = RMSLViewer()
    app.exec_()


if __name__ == '__main__':
    main()
