import os
import shutil
import sys
from queue import Empty
from threading import Thread, Lock
import cv2
import numpy as np
import time
from PyQt5.QtWidgets import QApplication, QMainWindow, QLabel, QVBoxLayout, QWidget
from PyQt5.QtGui import QImage, QPixmap
from PyQt5.QtCore import Qt, QThread, pyqtSignal, QTimer
from multiprocessing import Queue
from pathlib import Path
import json
import logging
from logging.handlers import RotatingFileHandler
from datetime import datetime
from openvino.runtime import Core, AsyncInferQueue
from threading import Lock

# ==================== 按日期+序号滚动日志 ====================
class DateRotatingFileHandler(RotatingFileHandler):
    def __init__(self, filename, maxBytes=0, backupCount=0, encoding=None, delay=False):
        super().__init__(filename, mode='a', maxBytes=maxBytes, backupCount=backupCount, encoding=encoding, delay=delay)
        self.base_dir = os.path.dirname(filename) or "."
        self.base_name = os.path.basename(filename)
        self.stem = os.path.splitext(self.base_name)[0]

    def doRollover(self):
        if self.stream:
            self.stream.close()
            self.stream = None
        today_str = datetime.now().strftime("%Y-%m-%d")
        target_base = f"{self.stem}_{today_str}"
        next_index = 0
        target_file = os.path.join(self.base_dir, f"{target_base}.log")
        while os.path.exists(target_file):
            next_index += 1
            target_file = os.path.join(self.base_dir, f"{target_base}-{next_index}.log")
        try:
            if os.path.exists(self.baseFilename):
                shutil.move(self.baseFilename, target_file)
        except Exception as e:
            self.handleError(None)
        if not self.delay:
            self.stream = self._open()
        if self.backupCount > 0:
            import re
            date_pattern = re.compile(rf"^{re.escape(self.stem)}_\d{{4}}-\d{{2}}-\d{{2}}(-\d+)?\.log$")
            archives = [f for f in os.listdir(self.base_dir) if date_pattern.match(f)]
            archives.sort(key=lambda x: os.path.getmtime(os.path.join(self.base_dir, x)), reverse=True)
            for old_file in archives[self.backupCount - 1:]:
                try:
                    os.remove(os.path.join(self.base_dir, old_file))
                except Exception:
                    pass


# ==================== 异步日志处理器 ====================
class AsyncLogHandler(logging.Handler):
    """
    将日志记录发送到后台线程进行异步写入，防止阻塞主线程（如GUI）
    """
    def __init__(self, handler, queue_size=1000):
        super().__init__()
        self.handler = handler  # 实际的日志处理器（如 FileHandler）
        self.queue = Queue(maxsize=queue_size)
        self.running = True
        self.thread = Thread(target=self._worker, name="AsyncLogWriter", daemon=True)
        self.start()

    def start(self):
        """启动后台写入线程"""
        self.thread.start()

    def _worker(self):
        """后台线程：从队列取出日志并写入文件"""
        while self.running:
            try:
                record = self.queue.get(timeout=0.5)  # 防止无限阻塞
                if record is None:  # 收到关闭信号
                    break
                self.handler.emit(record)
            except Empty:
                continue  # 超时继续循环
            except Exception as e:
                print(f"AsyncLogHandler 写入异常: {e}")  # 不使用 logger，避免递归
                continue

    def emit(self, record):
        """主线程调用 emit -> 推送到队列"""
        try:
            self.queue.put_nowait(record)
        except Exception:
            # 如果队列满，可以选择丢弃或使用 put(block=False)
            pass

    def close(self):
        """安全关闭：停止线程并清理资源"""
        if self.running:
            self.running = False
            self.queue.put(None)  # 发送退出信号
            self.handler.close()  # 关闭底层处理器
            super().close()


# ==================== 初始化异步日志 ====================
def setup_logging(log_dir="logs", log_filename="Record.log", level=logging.INFO, max_bytes=2 * 1024 * 1024, backup_count=3):
    import os
    os.makedirs(log_dir, exist_ok=True)
    log_path = os.path.join(log_dir, log_filename)

    logger = logging.getLogger("InferenceSpeed")
    if logger.hasHandlers():
        logger.handlers.clear()
    logger.setLevel(level)

    # 1. 创建带日期滚动的文件处理器
    file_handler = DateRotatingFileHandler(
        log_path,
        maxBytes=max_bytes,
        backupCount=backup_count,
        encoding='utf-8'
    )
    formatter = logging.Formatter(
        fmt='%(asctime)s.%(msecs)03d [%(levelname)s] %(message)s',
        datefmt='%Y-%m-%d %H:%M:%S'
    )
    file_handler.setFormatter(formatter)

    # 2. 包装成异步处理器
    async_handler = AsyncLogHandler(file_handler, queue_size=1000)

    # 3. 添加到 logger
    logger.addHandler(async_handler)

    return logger


# 全局日志实例
SPEED_LOGGER = setup_logging()


# ==================== 加载配置文件 ====================
def load_config():
    import os
    if getattr(sys, 'frozen', False):
        base_dir = Path(sys.executable).parent
    else:
        base_dir = Path(__file__).parent
    config_path = base_dir / "config-pt-onnx-openVINO.json"
    SPEED_LOGGER.info(f"配置文件config_path路径: {config_path}")
    try:
        with open(config_path, "r", encoding="utf-8") as f:
            config = json.load(f)
    except FileNotFoundError:
        sys.exit(f"错误：配置文件 {config_path} 不存在")
    except json.JSONDecodeError:
        sys.exit(f"错误：配置文件格式不正确")

    rtsp_urls = []
    for cam in config["cameras"]:
        url = cam['url']
        rtsp_urls.append(url)

    model_path = Path(config["model_path"])
    if not model_path.exists():
        if (model_path / "model.xml").exists():
            model_path = model_path / "model.xml"
        else:
            sys.exit(f"错误：模型文件 {model_path} 不存在")

    return {
        "rtsp_urls": rtsp_urls,
        "model_path": str(model_path),
        "save_dirs": config["save_dirs"],
        "center_detection_ratio": config.get("center_detection_ratio", 0.4),
        "nested_classes": config.get("nested_classes", {"wheel": ["hel", "nohel"]}),
        "classes_to_show": config["classes_to_show"],
        "image_size": config["image_size"],
        "frame_interval": config.get("frame_interval", 2),
    }


# ==================== 图像保存模块 ====================
class RawFrameSaver:
    def __init__(self, save_dir="raw_frames"):
        config = load_config()
        self.save_dir = Path(config["save_dirs"]["raw_frames"])
        self.save_dir.mkdir(exist_ok=True)
        self.min_interval = 1.0
        self.last_save_time = 0
        self.lock = Lock()

    def add_frame(self, frame, timestamp_str):
        current_time = time.time()
        with self.lock:
            if current_time - self.last_save_time >= self.min_interval:
                filename = f"{timestamp_str}.jpg"
                save_path = self.save_dir / filename
                # 使用线程池保存以不阻塞主线程
                Thread(target=cv2.imwrite, args=(str(save_path), frame), daemon=True).start()
                self.last_save_time = current_time
                SPEED_LOGGER.debug(f"💾 已保存原始帧: {filename}")


class FrameSaver:
    def __init__(self, save_dir="full_frames"):
        config = load_config()
        self.save_dir = Path(config["save_dirs"]["full_frames"])
        self.save_dir.mkdir(exist_ok=True)
        self.min_interval = 1.0
        self.last_save_time = 0
        self.lock = Lock()

    def save_full_frame(self, frame, timestamp_str):
        current_time = time.time()
        with self.lock:
            if current_time - self.last_save_time >= self.min_interval:
                filename = f"{timestamp_str}.jpg"
                save_path = self.save_dir / filename
                Thread(target=cv2.imwrite, args=(str(save_path), frame), daemon=True).start()
                self.last_save_time = current_time
                SPEED_LOGGER.debug(f"📸 已保存完整带框图: {filename}")


class ImageSaver:
    def __init__(self, save_dir="detected_objects"):
        config = load_config()
        self.save_dir = Path(config["save_dirs"]["detected_objects"])
        self.save_dir.mkdir(exist_ok=True)
        self.frame_counter = 0
        self.min_interval = 0.5
        self.last_save_time = 0
        self.lock = Lock()

    def save_cropped_image(self, frame, box, class_name, conf):
        current_time = time.time()
        with self.lock:
            if current_time - self.last_save_time < self.min_interval:
                return
            x1, y1, x2, y2 = map(int, box)
            cropped = frame[y1:y2, x1:x2]
            if cropped.size == 0:
                return
            timestamp = time.strftime("%Y%m%d_%H%M%S")
            filename = f"{timestamp}_{class_name}_{conf:.2f}_{self.frame_counter:04d}.jpg"
            save_path = self.save_dir / filename

            # 异步保存
            def _save():
                ret, buffer = cv2.imencode('.jpg', cropped, [cv2.IMWRITE_JPEG_QUALITY, 95])
                if ret:
                    buffer.tofile(str(save_path))
                    self.frame_counter += 1
                    self.last_save_time = current_time
                    SPEED_LOGGER.debug(f"✂️ 已裁剪保存: {filename}")

            Thread(target=_save, daemon=True).start()


# ==================== 推理线程 ====================
class InferenceWorker(QThread):
    result_ready = pyqtSignal(np.ndarray)

    def __init__(self, model_path, center_ratio, nested_classes, class_names=None, image_size=320, num_requests=2):
        super().__init__()
        self.model_path = Path(model_path)
        self.center_ratio = center_ratio
        self.nested_classes_config = nested_classes
        self.class_names = class_names or ["hel", "nohel", "wheel"]
        self.expected_num_classes = len(self.class_names)
        self.frame_saver = FrameSaver()
        self.raw_saver = RawFrameSaver()
        self.frame_queue = Queue(maxsize=2)
        self.min_interval = 1.0
        self.last_save_time = 0
        self.unified_imgsz = image_size
        self.running = True
        self.num_requests = num_requests
        self.lock = Lock()

        # 初始化 OpenVINO
        self.core = Core()
        self.compiled_model = None
        self.async_queue = None
        self.input_key = None
        self.output_key = None

        # 存储每帧的元数据
        self.frame_info = [None] * self.num_requests
        self.info_idx = 0

        # 加载模型
        self._load_model()

    def _empty_result(self):
        return [{
            'boxes': np.empty((0, 4)),
            'confs': np.empty((0,)),
            'cls_ids': np.empty((0,), dtype=int),
            'names': dict(enumerate(self.class_names))
        }]

    def _load_model(self):
        xml_path = self.model_path if self.model_path.suffix == ".xml" else self.model_path / "model.xml"
        bin_path = xml_path.with_suffix(".bin")

        if not xml_path.exists():
            raise FileNotFoundError(f"未找到模型文件: {xml_path}")
        if not bin_path.exists():
            raise FileNotFoundError(f"未找到权重文件: {bin_path}")

        model = self.core.read_model(model=str(xml_path))

        # --- 性能优化配置 ---
        config = {
            "PERFORMANCE_HINT": "THROUGHPUT",           # 利用多核提升吞吐
            "INFERENCE_NUM_THREADS": "4",               # i5-7200U 是 4 线程
            "ENABLE_HYPER_THREADING": "YES",
        }

        self.compiled_model = self.core.compile_model(model, "CPU", config)
        self.input_key = self.compiled_model.input(0)
        self.output_key = self.compiled_model.output(0)

        out_shape = self.output_key.partial_shape
        shape_list = [dim.get_length() for dim in out_shape]

        SPEED_LOGGER.info(f"🔧 模型输出形状: {shape_list}")

        if len(shape_list) == 3 and shape_list[0] == 1 and shape_list[1] >= 100:
            self.is_nms_model = True
            SPEED_LOGGER.info(f"✅ 已加载【带NMS】的模型，最大输出框数: {shape_list[1]}, 字段数: {shape_list[2]}")
        else:
            C = out_shape[1].get_length()
            num_classes = C - 4
            if num_classes != self.expected_num_classes:
                raise ValueError(f"模型输出维度异常：预期 {4 + self.expected_num_classes}，实际 {C}")
            self.is_nms_model = False
            SPEED_LOGGER.info(f"⚠️ 已加载【原始输出】模型，通道数 C={C}")

        # 创建异步队列（推荐方式）
        self.async_queue = AsyncInferQueue(self.compiled_model, self.num_requests)
        self.async_queue.set_callback(self._inference_callback)

    def _preprocess(self, frame):
        h, w = frame.shape[:2]
        scale = self.unified_imgsz / max(h, w)
        new_h, new_w = int(h * scale), int(w * scale)

        resized_frame = cv2.resize(frame, (new_w, new_h), interpolation=cv2.INTER_AREA)
        pad_h = self.unified_imgsz - new_h
        pad_w = self.unified_imgsz - new_w
        top = pad_h // 2
        bottom = pad_h - top
        left = pad_w // 2
        right = pad_w - left

        padded_frame = cv2.copyMakeBorder(resized_frame, top, bottom, left, right,
                                          borderType=cv2.BORDER_CONSTANT, value=(0, 0, 0))
        rgb_frame = cv2.cvtColor(padded_frame, cv2.COLOR_BGR2RGB)
        input_tensor = np.expand_dims(rgb_frame.transpose(2, 0, 1), axis=0).astype(np.float32) / 255.0
        return input_tensor, {
            'top': top,
            'left': left,
            'orig_size': (h, w),
            'new_size': (new_h, new_w),
            'raw_frame': frame.copy(),
            'timestamp': time.time()
        }

    def _inference_callback(self, infer_request, userdata):
        """推理完成后自动调用"""
        try:
            output = infer_request.get_output_tensor(0).data
            info = userdata['info']
            pre_time = userdata['pre_time']

            start_inf = time.time()
            inf_time = start_inf - info['timestamp']  # 包含排队时间

            if self.is_nms_model:
                result = np.ascontiguousarray(output).astype(np.float32)
                if result.ndim > 2:
                    result = result[0]
                boxes_640 = result[:, :4]
                confs = result[:, 4]
                cls_ids = result[:, 5].astype(int)
                valid_mask = confs > 0.5

                if not valid_mask.any():
                    post_time = time.time() - start_inf
                    total_time = time.time() - info['timestamp']
                    SPEED_LOGGER.debug(f"⏱ 推理耗时 | 总: {total_time * 1000:.1f}ms | 预处理: {pre_time * 1000:.1f}ms | "
                                      f"推理: {inf_time * 1000:.1f}ms | 后处理: {post_time * 1000:.1f}ms | 检测数: 0")
                    annotated = self.process_results(info['raw_frame'], self._empty_result())
                    self.result_ready.emit(annotated)
                    return

                boxes_filtered = boxes_640[valid_mask]
                boxes_original = self._rescale_boxes(
                    boxes_filtered, info['new_size'], (info['top'], info['left']), info['orig_size']
                )
                res = [{
                    'boxes': boxes_original,
                    'confs': confs[valid_mask],
                    'cls_ids': cls_ids[valid_mask],
                    'names': dict(enumerate(self.class_names))
                }]

            else:
                C, N = output.shape
                output = output.transpose(1, 0)
                box_cxcywh = output[:, :4]
                scores = output[:, 4:]
                max_conf = np.max(scores, axis=1)
                cls_ids = np.argmax(scores, axis=1)
                conf_mask = max_conf >= 0.5

                if not conf_mask.any():
                    post_time = time.time() - start_inf
                    total_time = time.time() - info['timestamp']
                    SPEED_LOGGER.debug(f"⏱ 推理耗时 | 总: {total_time * 1000:.1f}ms | 预处理: {pre_time * 1000:.1f}ms | "
                                      f"推理: {inf_time * 1000:.1f}ms | 后处理: {post_time * 1000:.1f}ms | 检测数: 0")
                    annotated = self.process_results(info['raw_frame'], self._empty_result())
                    self.result_ready.emit(annotated)
                    return

                box_xyxy = self._cxcywh_to_xyxy(box_cxcywh[conf_mask])
                confs = max_conf[conf_mask]
                cls_ids = conf_mask_cls_ids = cls_ids[conf_mask]

                indices = cv2.dnn.NMSBoxes(
                    boxes=box_xyxy.tolist(),
                    scores=confs.tolist(),
                    score_threshold=0.5,
                    nms_threshold=0.45,
                    eta=1.0,
                    top_k=300
                )
                indices = indices.flatten() if len(indices) > 0 else []

                final_boxes = box_xyxy[indices]
                final_confs = confs[indices]
                final_cls_ids = cls_ids[indices]

                final_boxes_orig = self._rescale_boxes(
                    final_boxes, info['new_size'], (info['top'], info['left']), info['orig_size']
                )

                res = [{
                    'boxes': final_boxes_orig,
                    'confs': final_confs,
                    'cls_ids': final_cls_ids,
                    'names': dict(enumerate(self.class_names))
                }]

            post_time = time.time() - start_inf
            total_time = time.time() - info['timestamp']
            num_dets = len(res[0]['boxes'])

            SPEED_LOGGER.debug(f"⏱ 推理耗时 | 总: {total_time * 1000:.1f}ms | 预处理: {pre_time * 1000:.1f}ms | "
                              f"推理: {inf_time * 1000:.1f}ms | 后处理: {post_time * 1000:.1f}ms | 检测数: {num_dets}")

            # 执行后处理并发送信号
            annotated_frame = self.process_results(info['raw_frame'], res)
            self.result_ready.emit(annotated_frame)

        except Exception as e:
            SPEED_LOGGER.error(f"❌ 回调处理失败: {str(e)}", exc_info=True)

    def run(self):
        while self.running:
            try:
                frame = self.frame_queue.get(timeout=1.0)

                # 预处理
                start_pre = time.time()
                input_tensor, info = self._preprocess(frame)
                pre_time = time.time() - start_pre

                # 提交异步推理
                try:
                    self.async_queue.start_async({self.input_key: input_tensor}, {'info': info, 'pre_time': pre_time})
                except RuntimeError as e:
                    if "Queue is full" in str(e):
                        SPEED_LOGGER.warning("跳过帧：异步队列满")
                    else:
                        SPEED_LOGGER.error(f"推理提交失败: {e}")

            except Empty:
                continue
            except Exception as e:
                SPEED_LOGGER.error(f"InferenceWorker 错误: {e}", exc_info=True)

    def stop(self):
        self.running = False
        if hasattr(self, 'async_queue') and self.async_queue:
            self.async_queue.wait_all()  # 等待所有推理完成
        self.wait(2000)

    # ------------------- 辅助方法 -------------------
    def _cxcywh_to_xyxy(self, boxes):
        x_c = boxes[:, 0]
        y_c = boxes[:, 1]
        w = boxes[:, 2]
        h = boxes[:, 3]
        x1 = x_c - w / 2
        y1 = y_c - h / 2
        x2 = x_c + w / 2
        y2 = y_c + h / 2
        return np.stack([x1, y1, x2, y2], axis=-1)

    def _rescale_boxes(self, boxes, resized_shape, padding, original_shape):
        new_h, new_w = resized_shape
        pad_top, pad_left = padding
        orig_h, orig_w = original_shape

        boxes_copy = boxes.copy()
        boxes_copy[:, [0, 2]] -= pad_left
        boxes_copy[:, [1, 3]] -= pad_top
        boxes_copy[:, [0, 2]] *= orig_w / new_w
        boxes_copy[:, [1, 3]] *= orig_h / new_h
        boxes_copy[:, :4] = np.clip(boxes_copy[:, :4], 0, [orig_w, orig_h, orig_w, orig_h])
        return boxes_copy

    def process_results(self, frame, results):
        frame = np.ascontiguousarray(frame.copy())
        h, w = frame.shape[:2]
        font_scale = max(0.6, h / 720.0 * 0.8)
        font_thickness = max(1, int(font_scale * 1.5))
        raw_frame = frame.copy()
        cx1 = int(w * (0.5 - self.center_ratio / 2))
        cy1 = int(h * (0.5 - self.center_ratio / 2))
        cx2 = int(w * (0.5 + self.center_ratio / 2))
        cy2 = int(h * (0.5 + self.center_ratio / 2))
        cv2.rectangle(frame, (cx1, cy1), (cx2, cy2), (0, 255, 255), 2)


        any_target_detected = False
        detected_targets = []

        class_colors = {
            "wheel": (0, 255, 0),
            "hel": (255, 0, 0),
            "nohel": (0, 0, 255),
        }

        for result in results:
            boxes = result.get('boxes', [])
            confs = result.get('confs', [])
            cls_ids = result.get('cls_ids', [])
            names = result.get('names', {})

            for box, conf, cls_id in zip(boxes, confs, cls_ids):
                class_name = names.get(cls_id, f"class{cls_id}")
                xc = int((box[0] + box[2]) / 2)
                yc = int((box[1] + box[3]) / 2)

                if cx1 <= xc <= cx2 and cy1 <= yc <= cy2:
                    detected_targets.append({
                        "box": box.copy(),
                        "conf": conf,
                        "class_name": class_name
                    })
                    any_target_detected = True

        for target in detected_targets:
            x1, y1, x2, y2 = map(int, target["box"])
            # self.saver.save_cropped_image(raw_frame, target["box"], target["class_name"], target["conf"]) # 暂时不保存裁剪图片，测试减少CPU压力
            color = class_colors.get(target["class_name"], (128, 128, 128))
            cv2.rectangle(frame, (x1, y1), (x2, y2), color, 2)
            label = f"{target['class_name']} {target['conf']:.2f}"
            (tw, th), _ = cv2.getTextSize(label, cv2.FONT_HERSHEY_COMPLEX_SMALL, font_scale, font_thickness)
            overlay = frame.copy()
            cv2.rectangle(overlay, (x1, max(0, y1 - th - 5)), (x1 + tw, y1), color, -1)
            alpha = 0.4
            frame = cv2.addWeighted(overlay, alpha, frame, 1 - alpha, 0)
            cv2.putText(frame, label, (x1, max(0, y1 - 5)),
                        cv2.FONT_HERSHEY_COMPLEX_SMALL, font_scale, (255, 255, 255), font_thickness, cv2.LINE_AA)

        current_time = time.time()
        if any_target_detected and current_time - self.last_save_time >= self.min_interval:
            ts_str = time.strftime("%Y%m%d_%H%M%S_") + str(int(current_time * 1000))
            self.raw_saver.add_frame(raw_frame, "detect_" + ts_str)
            self.frame_saver.save_full_frame(frame.copy(), "detect_" + ts_str)
            self.last_save_time = current_time
            SPEED_LOGGER.debug(f"✅ 已触发图像保存: {ts_str}")

        return frame


# ==================== 视频采集与显示线程 ====================
class VideoThread(QThread):
    update_frame = pyqtSignal(QImage)

    def __init__(self, rtsp_url, model_path, center_ratio, nested_classes, class_names, image_size, frame_interval):
        super().__init__()
        self.rtsp_url = rtsp_url
        self.frame_interval = frame_interval
        self.cap = None
        self.frame_count = 0
        self.running = False

        # 创建并连接推理线程
        self.inference_worker = InferenceWorker(
            model_path=model_path,
            center_ratio=center_ratio,
            nested_classes=nested_classes,
            class_names=class_names,
            image_size=image_size
        )
        self.inference_worker.result_ready.connect(self.on_inference_done)
        # 启动推理线程
        self.inference_worker.start()

    def run(self):
        """子线程主函数：轻量轮询 + 控制频率"""
        self.running = True

        # 打开摄像头
        self.cap = cv2.VideoCapture(self.rtsp_url, cv2.CAP_FFMPEG)
        if not self.cap.isOpened():
            SPEED_LOGGER.error(f"无法打开 RTSP 流: {self.rtsp_url}")
            return

        # 设置参数提升实时性
        self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 1)  # 最小缓冲区
        self.cap.set(cv2.CAP_PROP_FPS, 25)
        cv2.ocl.setUseOpenCL(False)  # 禁用 OpenCL 防止某些驱动卡死

        period = 1.0 / 30  # 目标 ~30 FPS
        last_time = time.time()

        while self.running:
            current_time = time.time()
            # 控制采集频率（避免过快）
            if current_time - last_time < period:
                time.sleep(0.001)
                continue
            last_time = current_time

            ret, frame = self.cap.read()
            if not ret:
                SPEED_LOGGER.warning("RTSP 流读取失败，尝试重连...")
                self.cap.release()
                time.sleep(1)
                self.cap.open(self.rtsp_url)
                continue

            self.frame_count += 1
            # 按间隔处理帧（例如每 2 帧处理一次）
            if self.frame_count % self.frame_interval == 0:
                queue_size = self.inference_worker.frame_queue.qsize()
                if queue_size < 2:  # 防止堆积
                    try:
                        self.inference_worker.frame_queue.put(frame.copy(), block=False)
                    except Exception as e:
                        SPEED_LOGGER.debug(f"跳过帧：队列已满 ({queue_size})")

        self.cleanup()

    def on_inference_done(self, annotated_frame):
        """由推理线程发出信号，在当前子线程中调用"""
        try:
            rgb = cv2.cvtColor(annotated_frame, cv2.COLOR_BGR2RGB)
            h, w, ch = rgb.shape
            bytes_per_line = ch * w
            qt_image = QImage(rgb.data, w, h, bytes_per_line, QImage.Format_RGB888)
            if not qt_image.isNull():
                # 发射到主线程更新 UI
                self.update_frame.emit(qt_image)
            else:
                SPEED_LOGGER.warning("⚠️ 生成 QImage 失败：数据不连续")
        except Exception as e:
            SPEED_LOGGER.error(f"🖼 图像转换失败: {str(e)}", exc_info=True)

    def cleanup(self):
        SPEED_LOGGER.info("🧹 清理 VideoThread 资源...")
        if self.cap and self.cap.isOpened():
            self.cap.release()
            self.cap = None
        self.inference_worker.stop()
        self.inference_worker.wait(2000)

    def stop(self):
        self.running = False
        self.quit()  # 请求退出
        self.wait(2000)  # 等待结束

# ==================== 主窗口 ====================
class MainWindow(QMainWindow):
    def __init__(self, rtsp_urls, model_path, center_ratio, nested_classes, class_names, image_size, frame_interval):
        super().__init__()
        self.rtsp_urls = rtsp_urls
        self.init_ui()
        self.init_video_threads(rtsp_urls, model_path, center_ratio, nested_classes, class_names, image_size, frame_interval)

    def init_ui(self):
        self.setWindowTitle("非机动车未戴头盔监控实时检测画面")
        self.setGeometry(100, 100, 1150, 600)
        central_widget = QWidget()
        self.setCentralWidget(central_widget)
        layout = QVBoxLayout(central_widget)
        self.video_labels = [QLabel(parent=central_widget) for _ in range(len(self.rtsp_urls))]
        for label in self.video_labels:
            label.setAlignment(Qt.AlignCenter)
            label.setMinimumSize(640, 480)
            layout.addWidget(label)

    def init_video_threads(self, rtsp_urls, model_path, center_ratio, nested_classes, class_names, image_size, frame_interval):
        self.threads = []
        for idx, url in enumerate(rtsp_urls):
            thread = VideoThread(url, model_path, center_ratio, nested_classes, class_names, image_size, frame_interval)
            thread.update_frame.connect(
                lambda img, lbl=self.video_labels[idx]: self.update_label(lbl, img)
            )
            thread.start()
            self.threads.append(thread)

    def update_label(self, label, qt_img):
        if not qt_img.isNull():
            scaled = qt_img.scaled(label.width(), label.height(), Qt.KeepAspectRatio, Qt.SmoothTransformation)
            label.setPixmap(QPixmap.fromImage(scaled))
        else:
            SPEED_LOGGER.warning("尝试显示空图像")

    def closeEvent(self, event):
        for t in self.threads:
            t.stop()
        # 等待所有线程结束
        for t in self.threads:
            t.wait(2000)

            # 关闭日志异步处理器
            for handler in SPEED_LOGGER.handlers:
                if isinstance(handler, AsyncLogHandler):
                    handler.close()
        event.accept()


# ==================== 启动入口 ====================
if __name__ == "__main__":
    app = QApplication(sys.argv)
    config = load_config()
    for d in config["save_dirs"].values():
        Path(d).mkdir(exist_ok=True)
    window = MainWindow(
        rtsp_urls=config["rtsp_urls"],
        model_path=config["model_path"],
        center_ratio=config["center_detection_ratio"],
        nested_classes=config["nested_classes"],
        class_names=config["classes_to_show"],
        image_size=config["image_size"],
        frame_interval=config["frame_interval"]
    )
    window.show()
    sys.exit(app.exec_())