import sys
sys.path.append('/home/sunrise/python_project1/code') 
import cv2
import time
import threading
import queue
import json
import numpy as np
from collections import OrderedDict # 用于有序字典
from uart import SerialComm
from web_show import ImageStreamer
from find_counts import LaserVisionProcessor
from yolov8_detect import YOLOv8Detector
from find_device import *


CONFIG_HSV = "/home/sunrise/python_project1/data/lab_config.json" 

# 摄像头读取进程
# -----------------------------------------------------------------------------
def camera_reader_process(
    run_flag,             # type: ignore # 用于控制进程启停的事件
    frame_queue,       # type: ignore # 用于将帧发送出去的队列
    camera_logical_name,    # 要读取的摄像头的逻辑名称 (例如 'cap_main', 'cap_aux')
    camera_settings # 可选的相机设置，例如 {'width': 1280, 'height': 720, 'fps': 30}
):
    """
    一个独立的进程，用于从指定的摄像头读取帧并通过队列发送。

    参数:
    - run_flag: multiprocessing.Event，全局运行/停止标志。
    - frame_queue: multiprocessing.Queue，用于发送 (camera_logical_name, frame) 元组。
    - camera_logical_name: str，在 find_specific_video_devices 返回的字典中查找的键。
    - camera_settings: dict, 可选，用于设置摄像头的分辨率和FPS。
                       例如: {'width': 1280, 'height': 720, 'fps': 30}
    """
    print(f"[摄像头进程 {camera_logical_name}] 启动。")
    cap = None
    device_identifier = None #可以是路径字符串或整数索引

    # 初始相机发现
    discovered_devices = find_specific_video_devices()
    if camera_logical_name in discovered_devices:
        device_identifier = discovered_devices[camera_logical_name]
        print(f"[摄像头进程 {camera_logical_name}] 找到设备标识: {device_identifier}")
    else:
        print(f"[摄像头进程 {camera_logical_name}] 启动时未找到设备。将尝试周期性查找。")

    retry_interval = 5  # 秒，如果相机打开失败，多久后重试
    frames_read = 0
    last_report_time = time.time()

    while run_flag.is_set():
        if cap is None or not cap.isOpened():
            # 如果没有找到设备标识，或者之前的尝试失败，则再次查找/尝试打开
            if device_identifier is None:
                print(f"[摄像头进程 {camera_logical_name}] 尝试重新查找设备...")
                discovered_devices = find_specific_video_devices()
                if camera_logical_name in discovered_devices:
                    device_identifier = discovered_devices[camera_logical_name]
                    print(f"[摄像头进程 {camera_logical_name}] 重新找到设备标识: {device_identifier}")
                else:
                    print(f"[摄像头进程 {camera_logical_name}] 仍未找到设备。将在 {retry_interval} 秒后重试。")
                    time.sleep(retry_interval)
                    continue # 回到 while run_flag.is_set() 的开头

            print(f"[摄像头进程 {camera_logical_name}] 尝试打开摄像头: {device_identifier}...")
            # device_identifier 可以是整数索引或设备路径字符串
            try:
                # 重要: 检查 device_identifier 的类型
                if isinstance(device_identifier, str) and not device_identifier.startswith('/dev/video'):
                    # 如果是字符串但不是设备路径，尝试转为整数 (例如 "0", "1")
                    try:
                        cap_id_int = int(device_identifier)
                        cap = cv2.VideoCapture(cap_id_int, cv2.CAP_V4L2) # 或者 cv2.CAP_ANY
                    except ValueError:
                        print(f"[摄像头进程 {camera_logical_name}] 无法将设备标识 '{device_identifier}' 转为整数。")
                        cap = None # 确保cap为None
                elif isinstance(device_identifier, str): # 是 '/dev/videoX' 路径
                    cap = cv2.VideoCapture(device_identifier, cv2.CAP_V4L2) # 明确使用V4L2后端
                else: # 假定是整数
                    cap = cv2.VideoCapture(device_identifier, cv2.CAP_V4L2)


                if cap is None or not cap.isOpened(): # 双重检查
                    raise ConnectionError(f"无法使用标识符 {device_identifier} 打开摄像头。")

                print(f"[摄像头进程 {camera_logical_name}] 摄像头 {device_identifier} 打开成功。")

                # 应用相机设置 
                if camera_settings:

                    # cap.set(cv2.CAP_PROP_AUTO_EXPOSURE, 1)  # 1 表示手动曝光模式 3：表示自动曝光模式
                    # cap.set(cv2.CAP_PROP_EXPOSURE, 100)  #曝光值需实验调整 
                    # cap.set(cv2.CAP_PROP_GAIN, 10)     # 增益值需实验调整

                    # 指定输出格式：MJPG = Motion-JPEG
                    #cap.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*'MJPG'))
                    if 'width' in camera_settings:
                        cap.set(cv2.CAP_PROP_FRAME_WIDTH, camera_settings['width'])
                    if 'height' in camera_settings:
                        cap.set(cv2.CAP_PROP_FRAME_HEIGHT, camera_settings['height'])
                    if 'fps' in camera_settings:
                        cap.set(cv2.CAP_PROP_FPS, camera_settings['fps'])
                    
                    # 验证设置是否生效 (可选)
                    actual_width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
                    actual_height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
                    actual_fps = cap.get(cv2.CAP_PROP_FPS)
                    print(f"[摄像头进程 {camera_logical_name}] 应用设置后 - 实际 Width: {actual_width}, Height: {actual_height}, FPS: {actual_fps}")


            except Exception as e: # 包括 ConnectionError
                print(f"[摄像头进程 {camera_logical_name}] 打开摄像头 {device_identifier} 失败: {e}")
                if cap is not None:
                    cap.release()
                cap = None
                device_identifier = None # 重置标识符，以便下次重新查找
                print(f"[摄像头进程 {camera_logical_name}] 将在 {retry_interval} 秒后重试。")
                time.sleep(retry_interval)
                continue # 回到 while run_flag.is_set() 的开头

        # 如果摄像头已成功打开
        ret, frame = cap.read()

        if ret:
            frames_read += 1
            try:
                # 将帧和相机名称一起放入队列
                # 使用 block=True 和 timeout 来避免在队列满时无限期阻塞，并允许进程响应 run_flag
                #frame = cv2.resize(frame, (640, 480),interpolation=cv2.INTER_AREA )
                # h, w = frame.shape[:2]
                # roi = frame[0:h, 100:460]
                
                # frame = roi  # 替换为 ROI 区域进行后续处理
                frame_queue.put((camera_logical_name, frame), block=False, timeout=0.01)
               
            except queue.Full:
                print(f"[摄像头进程 {camera_logical_name}] 警告: 帧队列已满，丢弃一帧。")
            except Exception as e:
                print(f"[摄像头进程 {camera_logical_name}] 发送帧到队列时出错: {e}")
                # 可能需要更复杂的错误处理，例如如果队列永久损坏
        else:
            print(f"[摄像头进程 {camera_logical_name}] 无法从摄像头读取帧。可能已断开连接。")
            cap.release()
            cap = None # 触发下次循环时的重新打开逻辑
            device_identifier = None # 重置标识符
            time.sleep(1) # 短暂等待后尝试重连

        # # 性能报告 (可选)
        # current_time = time.time()
        # if current_time - last_report_time >= 1: # 每1秒报告一次
        #     actual_fps_calc = frames_read / (current_time - last_report_time)
        #     #print(f"[摄像头进程 {camera_logical_name}] FPS (读取): {actual_fps_calc:.2f} (在过去1秒内)")
        #     frames_read = 0
        #     last_report_time = current_time
        
        # 控制读取速率，避免CPU占用过高（如果摄像头FPS很高）
        # time.sleep(0.001) # 非常小的延迟

    # 清理
    if cap is not None:
        cap.release()
    print(f"[摄像头进程 {camera_logical_name}] 正在停止。")
    cv2.destroyAllWindows() # 虽然此进程不显示，但以防万一



def serial_worker(run_flag, mode_flag, port_name, baud_rate, write_q, read_q, retry_interval = 5):
    """
    单个串口的读写线程函数，带断连重连逻辑。
    - run_flag: multiprocessing.Event，全局启动/停止
    - mode_flag: multiprocessing.Value('b')，此串口启停控制
    - port_name: 串口设备名
    - baud_rate: 波特率
    - write_q: multiprocessing.Queue，发送队列
    - read_q: multiprocessing.Queue，接收队列
    - retry_interval: 重连间隔（秒）
    """
    while run_flag.is_set():
        if not mode_flag.value:
            # 如果被关闭，则短暂休眠并检查状态
            time.sleep(0.1)
            continue

        # 尝试打开串口
        try:
            ser = SerialComm(port_name, baud_rate, timeout=0.01)
            print(f"[{port_name}] 初始化成功。")
        except Exception as e:
            print(f"[{port_name}] 打开失败: {e}，{retry_interval}s 后重试")
            time.sleep(retry_interval)
            continue

        # 启动读写线程
        stop_threads = threading.Event()
        exception_counter = 0
        max_exceptions = 5

        def reader():
            nonlocal exception_counter
            print(f"[{port_name}] 读线程启动。")
            while run_flag.is_set() and mode_flag.value and not stop_threads.is_set():
                try:
                    data = ser.receive()
                    if data:
                        read_q.put(data)
                    else:
                        time.sleep(0.01)
                except Exception as e:
                    exception_counter += 1
                    print(f"[{port_name} 读] 错误({exception_counter}): {e}")
                    time.sleep(0.5)
                    if exception_counter >= max_exceptions:
                        stop_threads.set()
            print(f"[{port_name}] 读线程停止。")

        def writer():
            nonlocal exception_counter
            print(f"[{port_name}] 写线程启动。")
            while run_flag.is_set() and mode_flag.value and not stop_threads.is_set():
                try:
                    pkt = write_q.get(block=False)
                    ser.send(pkt)
                except queue.Empty:
                    time.sleep(0.01)
                except Exception as e:
                    exception_counter += 1
                    print(f"[{port_name} 写] 错误({exception_counter}): {e}")
                    time.sleep(0.5)
                    if exception_counter >= max_exceptions:
                        stop_threads.set()
            print(f"[{port_name}] 写线程停止。")

        # 启动并监控线程
        t_r = threading.Thread(target=reader, daemon=True)
        t_w = threading.Thread(target=writer, daemon=True)
        t_r.start()
        t_w.start()

        # 等待任一线程触发停止或全局停止
        while run_flag.is_set() and mode_flag.value and not stop_threads.is_set():
            time.sleep(0.1)

        # 请求线程退出
        stop_threads.set()
        t_r.join()
        t_w.join()

        # 关闭串口
        try:
            ser.close()
            print(f"[{port_name}] 串口已关闭。")
        except Exception as e:
            print(f"[{port_name}] 关闭失败: {e}")

        # 如果全局或模式被关闭，就不再重连
        if not (run_flag.is_set() and mode_flag.value):
            break

        # 重连前休眠
        print(f"[{port_name}] 断连，{retry_interval}s 后重连…")
        time.sleep(retry_interval)

    print(f"[{port_name}] Worker 退出。")


# --- 进程函数 ---
def serial_process_task(run_flag, ports_config):
    """
    串口管理进程：一个进程内并行多路串口。
    - run_flag: multiprocessing.Event，全局开/关
    - ports_config: list of tuples:
        (mode_flag, port_name, baud_rate, write_q, read_q)
    """
    print("[串口管理进程] 启动，共管理 %d 路串口。" % len(ports_config))
    threads = []
    # 为每路串口启动一个 worker 线程
    for mode_flag, port, baud, wq, rq in ports_config:
        th = threading.Thread(
            target=serial_worker,
            args=(run_flag, mode_flag, port, baud, wq, rq , 5),
            daemon=True
        )
        th.start()
        threads.append(th)

    # 等待全局停止
    while run_flag.is_set():
        time.sleep(0.1)

    print("[串口管理进程] 收到停止信号，等待线程退出…")
    for th in threads:
        th.join()
    print("[串口管理进程] 已退出。")



def image_processing_process_task(
    run_flag,               # multiprocessing.Event 
    web_display_flag,       # multiprocessing.Value('b', True/False),是否显示web
    task_yolov8_mode,       # multiprocessing.Value('b', True/False)，是否启用 YOLOv8
    task_find_counts_mode,  # multiprocessing.Value('b', True/False)，是否启用 LaserVisionProcessor
    frame_source,           # multiprocessing.Queue，用于接受图片帧
    image_to_web_queue,      # multiprocessing.Queue，用于发送 (frame, data) 给前端
    data_queue              # multiprocessing.Queue，用于发送识别结果给下位机
):
    """
    图像处理进程任务
    
    参数:
      - run_flag: 全局启动/停止控制标志
      - web_display_flag: 是否开启网页显示
      - task_yolov8_mode: 是否运行 YOLOv8 检测
      - task_find_counts_mode: 是否运行圆环/计数检测
      - frame_source: 摄像头设备传输图像
      - image_to_web_queue: 用于传输图像和数据给前端的队列
      - data_queue: 用于传输图像和数据给下位机
    """
    
    # 如果全局或模式都关闭，则直接退出
    if not run_flag.is_set():
        print("[图像处理进程] run_flag 未设置，退出。")
        return
    if not (task_yolov8_mode.value or task_find_counts_mode.value):
        print("[图像处理进程] 未启用任何检测模式，退出。")
        return

    # --- 初始化yolov8检测器 ---
    detector = None
    if task_yolov8_mode.value:
        try:
            detector = YOLOv8Detector(
                model_path="/home/sunrise/python_project1/model/yolov8n_detect_bayese_224x224_nv12_modified.bin",
                classes_num=1,
                score_thres=0.5,
                nms_thres=0.7,
                reg=16
            )
            print("[图像处理进程] YOLOv8Detector 初始化成功。")
        except Exception as e:
            print(f"[图像处理进程] 初始化 YOLOv8Detector 失败: {e}")
            return

    find_counts_processor = None
    if task_find_counts_mode.value:
        try:
            find_counts_processor = LaserVisionProcessor(
                width=640, height=480, fps=60
            )
            print("[图像处理进程] LaserVisionProcessor 初始化成功。")
        except Exception as e:
            print(f"[图像处理进程] 初始化 LaserVisionProcessor 失败: {e}")
            return

    overall_frame_count = 0
    overall_start_time = time.time()
    inference_time_sum = 0.0
    inference_frame_count = 0
    counts_step = 0
    counts_step_cnt = 0

    step_gas = 15 # 细分步长阈值判断，大于10移动下一目标
    step_gas_cnt = 1 # 记录到达的步数


    frame_cnt = None
    last_cx , last_cy = 0,0
    current_cx, current_cy = 0, 0 # 当前帧检测到的圆心
    center_init_flag = 0 #初始化起点坐标标志


    
    result_point = None # 用于存储检测到的目标点
    order_points = []  #用于存储目标点的列表
    offest_x = 0  # x检测偏移
    offest_y = 0  # y检测偏移
  
    h_center = 240
    w_center = 320


    with open(CONFIG_HSV) as f:
            color_data = json.load(f)
        
    # 提取红色阈值示例
    red_lower = np.array([color_data['red']['l_lower'],color_data['red']['a_lower'],color_data['red']['b_lower']])
    red_upper = np.array([color_data['red']['l_upper'],color_data['red']['a_upper'],color_data['red']['b_upper']])

    green_lower = np.array([color_data['green']['l_lower'],color_data['green']['a_lower'],color_data['green']['b_lower']])
    green_upper = np.array([color_data['green']['l_upper'],color_data['green']['a_upper'],color_data['green']['b_upper']])
    # for color_name, thresholds in color_data.items():
    #     print(f"{color_name}:")
    #     print(f"Lower: {thresholds['lower']}")
    #     print(f"Upper: {thresholds['upper']}")
    #     print("------")



    # 主循环
    while run_flag.is_set():
        camera_name, original_frame = frame_source.get()
        if original_frame is None:
            time.sleep(0.01)
            continue

        # 统一 resize
        display_frame = cv2.resize(original_frame, (640, 480))
        h, w = display_frame.shape[:2]
     
        parts = []

        # YOLOv8 检测和绘制
        if task_yolov8_mode.value:
            t0 = time.perf_counter()
            frame_yolo, detections = detector.process_frame(display_frame.copy())
            t1 = time.perf_counter()

            inference_time_sum += (t1 - t0)
            inference_frame_count += 1
            yolov8_fps = inference_frame_count / inference_time_sum if inference_time_sum > 0 else 0.0
            cv2.putText( frame_yolo, f"YOLOv8 FPS: {yolov8_fps:.2f}", (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
            # 可选：在 frame_yolo 上绘制检测框（detector.process_frame 已绘制）
            parts.append(frame_yolo)
        # 如果关闭，直接显示空白或原图
        else:

            display_frame_roi = display_frame.copy()[0:480,130:460]
            parts.append(display_frame_roi)


        # 圆环/计数检测
        if task_find_counts_mode.value:
        
            result_frame,result_point = None,None
            result = None
            hsv_bin = None
         
            if counts_step == 0:
                display_frame_roi = display_frame.copy()[0:480,130:460]

                gray = cv2.cvtColor(display_frame_roi, cv2.COLOR_BGR2GRAY)
                _, bin_img = cv2.threshold(gray, 80, 255, cv2.THRESH_BINARY_INV)
                result = find_counts_processor.get_contours(bin_img, display_frame.copy())
                if result is not None:
                    result_frame, result_point = result # result_point is (M, 1, 2)
                    order_points = []
                    if result_frame is not None and result_point is not None and len(result_point) > 0: # 添加 len(result_point) > 0 检查
                        frame_cnt = result_frame
                        counts_step_cnt += 1

                        for i, point_wrapper in enumerate(result_point):
                            actual_point_coords = tuple(point_wrapper[0]) # point_wrapper[0] is [x,y], tuple() converts it
                            order_points.append(actual_point_coords)
                            
                            cv2.circle(frame_cnt, actual_point_coords, 2, (0, 0, 255), -1)
                            
                            #cv2.putText(frame_cnt, f"index{i+1}:", (actual_point_coords[0]+10, actual_point_coords[1]-10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
                        # 打印时，如果想看 (N,2) 形状的点，可以 squeeze 一下
                        # print(f"Counts_detect: {result_point.squeeze(axis=1)}")
                        parts.append(frame_cnt)
                    else:
                        parts.append(display_frame.copy())
                else:
                    parts.append(display_frame.copy())

                if counts_step_cnt > 5:
                    counts_step = 1
                    print("轮廓检测结束，激光点检测开始")

            else:

                display_frame_roi = display_frame.copy()[0:480,130:460]
                hsv_frame = cv2.cvtColor(display_frame_roi, cv2.COLOR_BGR2LAB)
         

                #hsv_bin = cv2.inRange(hsv_frame, green_lower, green_upper)
                hsv_bin = cv2.inRange(hsv_frame, red_lower, red_upper)
            
                hsv_bin = cv2.morphologyEx(hsv_bin, cv2.MORPH_CLOSE, np.ones((3,3), np.uint8))
                hsv_bin = cv2.morphologyEx(hsv_bin, cv2.MORPH_OPEN, np.ones((3,3), np.uint8))
                hsv_bin = cv2.dilate(hsv_bin, np.ones((3,3), np.uint8), iterations=2)
                
                
                result = find_counts_processor.get_circle(hsv_bin, display_frame.copy())
                
                if len(order_points) > 0 and center_init_flag == 0:
                        
                    w_center = order_points[0][0]
                    h_center = order_points[0][1]
                    center_init_flag = 1
            

                if result is not None:
                    result_frame, cx_detected, cy_detected = result
                    frame_cnt_to_display = result_frame # 使用检测器处理过的帧作为基础

                    if cx_detected is not None and cy_detected is not None:  # 激光点坐标存在
                        current_cx, current_cy = cx_detected, cy_detected

                        # 1. 先计算当前帧的偏移量 (BUG修复：移到条件判断前)
                        # 统一偏移量计算方向 (建议)
                        current_offest_x = w_center - current_cx
                        current_offest_y = current_cy - h_center 

                        # 2. 判断是否需要更新目标中心
                        # 条件1: 激光点相比上一帧有显著移动
                        moved_significantly = False
                        if last_cx is not None and last_cy is not None: # 确保 last_cx, last_cy 已被赋值
                            if abs(current_cx - last_cx) > step_gas or abs(current_cy - last_cy) > step_gas:
                                moved_significantly = True
                        else: # 如果是第一次检测到，也认为是有显著移动（或者根据需求定义）
                            moved_significantly = True

                        # 条件2: 当前激光点已非常接近当前目标点 (使用当前计算的偏移量)
                        arrival_threshold = 6 # 可调参数
                        is_near_target = (abs(current_offest_x) < arrival_threshold and 
                                        abs(current_offest_y) < arrival_threshold)
                        #if moved_significantly and is_near_target:
                        if is_near_target:
                            if len(order_points) > 0 and step_gas_cnt < len(order_points):
                                # 更新到 order_points 中的下一个目标点
                                w_center = order_points[step_gas_cnt][0]
                                h_center = order_points[step_gas_cnt][1]
                                step_gas_cnt += 1
                      
                                print(f"Advanced to target {step_gas_cnt}/{len(order_points)}: ({w_center}, {h_center})")
                            elif step_gas_cnt >= len(order_points) and len(order_points) > 0:
                                offest_x = 0
                                offest_y = 0
                                # step_gas_cnt = 1
                                print("All target points reached.")
                                # 可选：停止、循环、或保持最后一个目标

                        # 更新 offest_x, offest_y 以便发送和记录 (这些是发送给控制器的值)
                        offest_x = current_offest_x
                        offest_y = current_offest_y
                        #print(f"Offest: ({offest_x}, {offest_y})")

                        # 更新 last_cx, last_cy
                        last_cx = current_cx
                        last_cy = current_cy
                        # 绘图
                        cv2.putText(frame_cnt_to_display, "Circle_detect:", (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 2)
                        cv2.circle(frame_cnt_to_display, (current_cx, current_cy), 8, (0, 0, 255), 2) # 当前激光点

                    else: # 未检测到激光点 

                        frame_cnt_to_display = result_frame # 即使没圆，也用 detector 返回的帧
                        cv2.putText(frame_cnt_to_display, "No_Circle_detect:", (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 2)

                        if last_cx is not None and last_cy is not None: # 如果有上一次位置，画出来
                            cv2.circle(frame_cnt_to_display, (last_cx, last_cy), 8, (0, 0, 255), 2)
                        
                        if last_cx is not None:
                        #    offest_x = 0
                        #    offest_y = 0
                           offest_x = w_center - last_cx
                           offest_y = last_cy - h_center 
        
                else: 
                    frame_cnt_to_display = display_frame.copy() # 使用原始帧
                    cv2.putText(frame_cnt_to_display, "Detector_Error:", (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
                    if last_cx is not None and last_cy is not None:
                        cv2.circle(frame_cnt_to_display, (last_cx, last_cy), 8, (0, 0, 255), 2)
                    offest_x, offest_y = 0, 0 # 检测器错误，不发送有效偏移

           
                cv2.circle(frame_cnt_to_display, (w_center, h_center), 3, (255, 255, 255), -1)

                if len(order_points) > 0:

                    contour = np.array(order_points, dtype=np.int32).reshape((-1, 1, 2))
                    cv2.drawContours(frame_cnt_to_display, [contour], -1, (0, 255, 0), 1)

                data_queue.put((offest_x, offest_y))
                parts.append(frame_cnt_to_display)
                                        
            
        else:
            parts.append(display_frame.copy())


        combined = np.hstack(parts)

        # 统计数据
        overall_frame_count += 1
        elapsed = time.time() - overall_start_time
        overall_fps = overall_frame_count / elapsed if elapsed > 0 else 0.0

        data = OrderedDict([
            ("camera", camera_name),
            ("overall_fps", round(overall_fps, 2)),
            ("resolution", f"{w}x{h}"),
            ("x,y,x_distance,y_distance,x_cneter,y_center",f"x:{current_cx},y:{current_cy} x_gas:{offest_x},y_gas:{offest_y} x_center:{w_center},y_center:{h_center}")
        ])

        # 附加 yolo 信息
        if task_yolov8_mode.value:
            data["yolo_fps"] = round(yolov8_fps, 2)
            # 格式化 detections
            formatted = []
            for cls_id, score, x1, y1, x2, y2 in detections:
                formatted.append({
                    "class_id": cls_id,
                    "score": round(float(score), 4),
                    "bbox": {"x1": x1, "y1": y1, "x2": x2, "y2": y2}
                })
            data["yolo_detections"] = formatted

        # 发送合并图
        if web_display_flag.value:
            try:
                image_to_web_queue.put((combined, dict(data)), block=False, timeout=0.01)
            except queue.Full:
                # 丢掉最旧，再放新帧
                try:
                    image_to_web_queue.get_nowait()
                    image_to_web_queue.put_nowait((combined, dict(data)))
                except Exception:
                    pass

    cv2.destroyAllWindows()
    print(f"[图像处理进程] 任务结束，共处理 {overall_frame_count} 帧。")


def web_display_process_task(run_flag, web_display_enable_flag, frame_data_q_in):
    """网页显示进程任务"""
    print("[网页显示进程] 启动。")
    streamer = ImageStreamer(port=5005, auto_open=True)

    has_started = False   # 标记是否已成功启动过 streamer
    was_enabled = False   # 用于限流打印禁用提示

    # 尝试启动一次（如果一开始就启用）
    if web_display_enable_flag.value:
        try:
            streamer.start()
            has_started = True
            print("[网页显示进程] ImageStreamer 启动成功。")
        except Exception as e:
            print(f"[网页显示进程] 启动失败: {e}")
            web_display_enable_flag.value = False

    while run_flag.is_set():

        # 如果被禁用且之前没打印过提示
        if not web_display_enable_flag.value:
            if not was_enabled:
                print("[网页显示进程] 网页显示已禁用，进入空闲模式。")
                was_enabled = True
            time.sleep(1)


            # 检查是否重启请求
            if web_display_enable_flag.value:
                # 重新启动前先 stop 旧实例，防止残留
                if has_started:
                    try:
                        streamer.stop()
                        print("[网页显示进程] 停止旧的 ImageStreamer 实例。")
                    except Exception:
                        pass
                try:
                    streamer.start()
                    has_started = True
                    was_enabled = False
                    print("[网页显示进程] ImageStreamer 重新启动成功。")
                except Exception as e:
                    print(f"[网页显示进程] 重新启动失败: {e}")
                    web_display_enable_flag.value = False
            continue


        # 已启用显示模式
        try:
            frame, data_dict = frame_data_q_in.get(timeout=0.1)
            # 更新图像和数据
            streamer.update_image_frame(frame)
            streamer.update_data(data_dict)
        except queue.Empty:
            # 无数据时可短暂休眠，避免忙循环
            time.sleep(0.01)
        except Exception as e:
            print(f"[网页显示进程] 处理帧/数据出错: {e}")
            time.sleep(0.1)
        

    # 退出前清理
    if has_started:
        try:
            streamer.stop()
            print("[网页显示进程] ImageStreamer 停止。")
        except Exception as e:
            print(f"[网页显示进程] 停止时出错: {e}")

    print("[网页显示进程] 已退出。")











