import cv2
import time
import threading
import queue
import json
import numpy as np
from driver.hdmi_show import HDMI_Display
from driver.kalman import kalman_filter
from driver.touch import TouchScreenHandler
from driver.uart import SerialComm
from driver.yolov8 import YOLOv8Detector
from driver.usb_img import find_specific_video_devices
from driver.hsv_find import *
from driver.orbbec_img import *
from driver.position_center import position_color_center
from driver.counts_line_follow import *


BTN_A = (50, 200, 200, 270)
BTN_B = (50, 350, 200, 420)
BTN_C = (50, 500, 200, 570)
BTN_A2 = (300, 200, 450, 270)
BTN_B2 = (300, 350, 450, 420)
BTN_C2 = (300, 500, 450, 570)
BTN_A3 = (550, 200, 700, 270)
BTN_B3 = (550, 350, 700, 420)
BTN_C3 = (550, 500, 700, 570)
stop_btn = (850, 500, 1000, 570)


def process_yolov8_detection(display_frame, yolov8_detector, kalman_filter_obj, task_yolov8_mode, detect_counts, data_to_send):
    """
    封装 YOLOv8 + Kalman 检测处理流程
    参数：
        display_frame: 输入图像帧
        yolov8_detector: YOLOv8 检测器
        kalman_filter_obj: Kalman 滤波器对象（可为 None）
        task_yolov8_mode: 控制是否启用该任务
        detect_counts: 检测次数状态统计字典
        data_to_send: 需要发送的数据字典

    返回：
        hdmi_display_frame: 处理后的图像
        yolov8_fps: 当前帧率
    """
    yolov8_fps = 0.0
    hdmi_display_frame = display_frame.copy()

    if task_yolov8_mode.value and yolov8_detector is not None:
        # 1. 推理计时
        t0 = time.perf_counter()
        hdmi_display_frame, detections = yolov8_detector.process_frame(display_frame)
        t1 = time.perf_counter()

        # 2. FPS 计算
        process_yolov8_detection.inference_time_sum += (t1 - t0)
        process_yolov8_detection.inference_frame_count += 1
        yolov8_fps = process_yolov8_detection.inference_frame_count / process_yolov8_detection.inference_time_sum

        # 3. 有检测结果
        if detections and hdmi_display_frame is not None:
            detect_counts['yolov8_detected'] += 1
            detect_counts['yolov8_missed'] = 0

            if detect_counts['yolov8_detected'] >= 3:
                class_id, score, x1, y1, x2, y2 = detections[0]  # 置信度最高目标
                cx, cy = (x1 + x2) // 2, (y1 + y2) // 2
                point = (int(cx), int(cy))
                px, py = point

                if kalman_filter_obj is not None:
                    try:
                        prediction = kalman_filter_obj.update(point)
                        if prediction is not None:
                            px, py = int(prediction[0, 0]), int(prediction[1, 0])
                            yolov8_vx, yolov8_vy = kalman_filter_obj.get_velocity()
                        else:
                            yolov8_vx, yolov8_vy = 0, 0
                        data_to_send.update({'yolov8_x': px, 'yolov8_y': py, 'yolov8_vx': yolov8_vx, 'yolov8_vy': yolov8_vy})
                    except Exception as e:
                        print(f"[YOLOv8] Kalman 更新异常: {e}")
                        data_to_send.update({'yolov8_x': cx, 'yolov8_y': cy, 'yolov8_vx': 0, 'yolov8_vy': 0})
                else:
                    data_to_send.update({'yolov8_x': cx, 'yolov8_y': cy, 'yolov8_vx': 0, 'yolov8_vy': 0})

                # 显示
                cv2.circle(hdmi_display_frame, (px, py), 4, (0, 0, 255), -1)
                cv2.putText(hdmi_display_frame, f"Score: {score:.2f}", (px - 10, py - 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 140), 1)
                cv2.putText(hdmi_display_frame, f"FPS: {yolov8_fps:.2f}", (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 200), 2)
        else:
            # 无检测
            detect_counts['yolov8_missed'] += 1

            if kalman_filter_obj is not None and detect_counts['yolov8_missed'] >= 2:
                detect_counts['yolov8_detected'] = 0
                kalman_filter_obj.lost_count = getattr(kalman_filter_obj, "lost_count", 0)
                kalman_filter_obj.max_lost_count = getattr(kalman_filter_obj, "max_lost_count", 30)

                if kalman_filter_obj.lost_count < kalman_filter_obj.max_lost_count:
                    try:
                        prediction = kalman_filter_obj.update((-1, -1))
                        if prediction is not None:
                            px, py = int(prediction[0, 0]), int(prediction[1, 0])
                            yolov8_vx, yolov8_vy = kalman_filter_obj.get_velocity()
                            data_to_send.update({'yolov8_x': px, 'yolov8_y': py, 'yolov8_vx': yolov8_vx, 'yolov8_vy': yolov8_vy})
                            cv2.circle(hdmi_display_frame, (px, py), 4, (0, 0, 255), -1)
                            cv2.putText(hdmi_display_frame, f"yolov8_lose: {kalman_filter_obj.lost_count}", (px - 10, py - 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 140), 1)
                        else:
                            data_to_send.update({'yolov8_x': -1, 'yolov8_y': -1, 'yolov8_vx': 0, 'yolov8_vy': 0})
                    except Exception as e:
                        print(f"[YOLOv8] Kalman 丢失预测异常: {e}")
                else:
                    data_to_send.update({'yolov8_x': -1, 'yolov8_y': -1, 'yolov8_vx': 0, 'yolov8_vy': 0})
                    cv2.putText(hdmi_display_frame, f"yolov8_lost", (1, 1), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 200), 2)

    return hdmi_display_frame, yolov8_fps


def process_circle(points, kalman_filter_obj, detect_counts, color_name, color_bgr, data_to_send, hdmi_display_frame):
    key_x = f"{color_name}_x"
    key_y = f"{color_name}_y"
    px,py = 0 , 0
    label_color = (0, 255, 0)

    if color_name not in detect_counts:
        detect_counts[color_name] = 0  # 初始化为 0

    if points is not None and hdmi_display_frame is not None:
        detect_counts[color_name] += 1
        if detect_counts[color_name] >= 3:
            # Kalman 分支
            if kalman_filter_obj is not None:
                try:
                    prediction = kalman_filter_obj.update(points)
                    if prediction is not None:
                        px = int(prediction[0, 0])
                        py = int(prediction[1, 0])
                        cv2.circle(hdmi_display_frame, (px, py), 5, color_bgr, -1)
                        cv2.putText(hdmi_display_frame, color_name, (px-20, py-20), cv2.FONT_HERSHEY_SIMPLEX, 1, label_color, 2)
                    else:
                        px, py = points
                    data_to_send.update({key_x: px, key_y: py})
                except Exception as e:
                    print(f"[{color_name}圆环图像处理] Kalman异常: {e}")
                    px, py = points
                    cv2.circle(hdmi_display_frame, (px, py), 5, color_bgr, -1)
                    cv2.putText(hdmi_display_frame, color_name, (px-20, py-20), cv2.FONT_HERSHEY_SIMPLEX, 1, label_color, 2)
                    data_to_send.update({key_x: px, key_y: py})
            else:
                px, py = points
                cv2.circle(hdmi_display_frame, (px, py), 5, color_bgr, -1)
                cv2.putText(hdmi_display_frame, color_name, (px-20, py-20), cv2.FONT_HERSHEY_SIMPLEX, 1, label_color, 2)
                data_to_send.update({key_x: px, key_y: py})
        else:
            print(f"[{color_name}] 检测到 {detect_counts[color_name]} 次，未确认")
    else:
        # 没检测到，进入 Kalman 预测
        detect_counts[color_name] = 0
        if kalman_filter_obj is not None and hdmi_display_frame is not None:
            if not hasattr(kalman_filter_obj, "lost_count"):
                kalman_filter_obj.lost_count = 0
            if not hasattr(kalman_filter_obj, "max_lost_count"):
                kalman_filter_obj.max_lost_count = 30

            try:
                if kalman_filter_obj.lost_count < kalman_filter_obj.max_lost_count:
                    if points is None:
                        points = (-1, -1)
                    prediction = kalman_filter_obj.update(points)
                    if prediction is not None:
                        px = int(prediction[0, 0])
                        py = int(prediction[1, 0])
                        cv2.circle(hdmi_display_frame, (px, py), 5, color_bgr, -1)
                        cv2.putText(hdmi_display_frame, f"{color_name}:{kalman_filter_obj.lost_count}", (px-10, py-20),
                                    cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 140), 1, cv2.LINE_AA)
                        data_to_send.update({key_x: px, key_y: py})
                else:
                    data_to_send.update({key_x: -1, key_y: -1})
                    cv2.putText(hdmi_display_frame, f"{color_name}_lost", (10, 50),
                                cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
            except Exception as e:
                print(f"[{color_name}预测异常]: {e}")

    return hdmi_display_frame, (px, py) if px is not None and py is not None else (-1, -1)


# 初始化静态变量
process_yolov8_detection.inference_time_sum = 0.0
process_yolov8_detection.inference_frame_count = 0


def image_processing_process_task(
    run_flag,               # multiprocessing.Event 
    frame_usb,              # multiprocessing.Value('b', True/False)，是否开启 usb_摄像头
    frame_gemini336l,       # multiprocessing.Value('b', True/False)，是否开启 双目摄像头
    color_mode,       # multiprocessing.Value("i", 0/1/2/3/4)， 0: 无模式，1: 红色 2: 蓝色 3: 绿色 4: 黄色
    hdmi_process_flag,      # multiprocessing.Value('b', True/False)，是否开启 HDMI 
    task_yolov8_mode,       # multiprocessing.Value('b', True/False)，是否启用 YOLOv8
    task_find_circles_mode,  # multiprocessing.Value('b', True/False)，是否启用 LaserVisionProcessor
    frame_source,           # multiprocessing.Queue，用于接受图片帧
    hdmi_source,     # multiprocessing.Queue，用于传输图像给 HDMI 显示器
    data_queue,            # multiprocessing.Queue，用于发送识别结果给下位机，
    task = 0 # 可选的任务参数，默认为0，  0:无任务 1:处理hsv识别 2:处理轮廓轨迹识别 3:
):
    """
    图像处理进程任务
    
    参数:
      - run_flag: 全局启动/停止控制标志
      - frame_usb usb_摄像头控制标志
      - frame_gemini336l  双目usb_摄像头控制标志    
      - web_display_flag: 是否开启网页显示
      - task_yolov8_mode: 是否运行 YOLOv8 检测
      - task_find_counts_mode: 是否运行圆环/计数检测
      - frame_source: 摄像头设备传输图像
      - image_to_web_queue: 用于传输图像和数据给前端的队列
      - data_queue: 用于传输图像和数据给下位机
    """

    inference_time_sum = 0 # 用于计算 YOLOv8 检测的推理时间
    inference_frame_count = 0 # 用于计算 YOLOv8 检测的帧数
    data_to_send = {} # 发送给下位机的数据
    original_frame = None # 初始化图片帧

    yolov8_detected = 0 # YOLOv8 检测到的目标计数
    yolov8_missed = 0 # YOLOv8 未检测到目标计数
    position_center_count = 0 # 初始中心坐标定位需要多少帧
    position_center_flag = 0 # 初始中心坐标定位标志
    center_green = None # 中心绿色点坐标

    
    step_gas = 15 # 细分步长阈值判断，大于10移动下一目标
    step_gas_cnt = 1 # 记录到达的步数
    counts_step = 0
    counts_step_cnt = 0

    #为检测目标设置一个检测计数器
    detect_counts = {
        "yolov8_detected": 0,"yolov8_missed": 0,
        "red": 0, 
        "blue": 0,
        "green": 0, 
        "yellow": 0 
    }

    # 如果全局或模式都关闭，则直接退出
    if not run_flag.is_set():
        print("[图像处理进程] run_flag 未设置，退出。")
        return
    # --- 初始化yolov8检测器 ---
    yolov8_detector = None

    kalman_filter_obj_yolov8 = None
    kalman_filter_obj_red = None
    kalman_filter_obj_blue = None
    kalman_filter_obj_green = None
    kalman_filter_obj_yellow = None
    try:
        BASE_DIR = os.path.dirname(os.path.abspath(__file__))
        model_path = os.path.join(BASE_DIR, "..", "model", "yolov8n_lucky_640x480_nv12_modified.bin")
        model_path = os.path.abspath(model_path)
        yolov8_detector = YOLOv8Detector(
            model_path=model_path,
            classes_num=1,
            score_thres=0.5,
            nms_thres=0.7,
            reg=16
        )
        kalman_filter_obj_yolov8 = kalman_filter(q=1,r=0.1)
        kalman_filter_obj_red_follow = kalman_filter()
        kalman_filter_obj_red = kalman_filter()
        kalman_filter_obj_blue = kalman_filter()
        kalman_filter_obj_green = kalman_filter()
        kalman_filter_obj_yellow = kalman_filter()
        print("[图像处理进程] kalman_filter_obj 初始化成功。")
        print("[图像处理进程] YOLOv8Detector 初始化成功。")

    except Exception as e:
        print(f"[图像处理进程] 初始化 YOLOv8Detector 失败: {e}")
    try:
        cam_gemini336l = OrbbecCamera(color=True, depth=True)
        cam_gemini336l.start()
        print("[双目摄像头进程] 启动成功。")
    except Exception as e:
        print(f"[双目摄像头进程] 启动失败: {e}")
        cam_gemini336l = None  # 启动失败时，cam 设为 None

    if task == 2:
        counts_line = None
        try:
          counts_line = ContourProcessor(length_threshold=30, straight_step=15, curve_step=5)
          print("[图像处理进程] 轮廓轨迹规划 初始化成功。")
        except Exception as e:
          print(f"[图像处理进程] 初始化 轮廓轨迹规划 失败: {e}")
          counts_line = None



    # 主循环
    while run_flag.is_set():
     
        # 优先使用双目摄像头
        if frame_gemini336l.value and cam_gemini336l is not None:
            try:
                original_frame, depth_img = cam_gemini336l.read_frames()
                original_frame = cv2.flip(original_frame, -1)  # 180翻转
                # print("[图像处理进程] 接收到双目摄像头帧")
            except Exception as e:
                print(f"[图像处理进程] 接收双目摄像头帧失败: {e}")
                original_frame = None
        # 如果没有双目摄像头，使用 USB 摄像头
        elif frame_source is not None and frame_usb.value:
            try:
                # 尝试获取帧
                _,original_frame = frame_source.get(timeout=0.001)
                # print("[图像处理进程] 接收到图像帧")
            except queue.Empty:
                # time.sleep(0.01)
                original_frame = None

        if original_frame is None:
            # print("[图像处理进程] 图像帧为空")
            continue
    
        # 检查 original_frame 是否为有效的 numpy 数组
        if not isinstance(original_frame, np.ndarray):
            print("[图像处理进程] original_frame 不是有效的图像数据，跳过。")
            continue

        # 统一 resize
        try:
            # roi_out = extract_white_roi_perspective(original_frame)
            roi_out = original_frame
            if roi_out is None:
                print("[警告] warped 图像为空，跳过处理")
                continue

            # 如果返回了 (warped, something)，只取第一个
            if isinstance(roi_out, tuple) and len(roi_out) >= 1:
                roi_result = roi_out[0]
            else:
                roi_result = roi_out

            # 再检查一下形状
            if not (isinstance(roi_result, np.ndarray) and roi_result.ndim == 3 and roi_result.shape[2] == 3):
                print("[警告] warped 图像不是 3 通道 BGR，跳过处理")
                continue

            original_frame = roi_result
            display_frame = cv2.resize(original_frame, (640, 480))

        except Exception as e:
            print(f"[图像处理进程] resize 失败: {e}, original_frame 类型: {type(original_frame)}")
            continue


        hdmi_display_frame = display_frame.copy()

        find_circle_frame = display_frame.copy()

        position_frame = display_frame.copy()

        follow_frame = display_frame.copy()

        h, w = display_frame.shape[:2]



        # if position_center_count >= 30:
        #     position_center_flag = 1

        # if not position_center_flag:
        #     center_green = position_color_center(position_frame, 'green', model=1)
        #     if center_green is None:
        #         continue

        #     position_center_count += 1
        #     data_to_send.update({
        #         'center_green_x': int(center_green[0]),
        #         'center_green_y': int(center_green[1])
        #     })
        #     print(f"模型 1 质心法 检测到的中心点: {center_green}")
        #     continue


        # YOLOv8 检测和绘制
        if task_yolov8_mode.value and yolov8_detector is not None:

                hdmi_display_frame, yolov8_fps = process_yolov8_detection(
                display_frame=display_frame,
                yolov8_detector=yolov8_detector,
                kalman_filter_obj=kalman_filter_obj_yolov8,
                task_yolov8_mode=task_yolov8_mode,
                detect_counts=detect_counts,
                data_to_send=data_to_send
            )


        if task_find_circles_mode.value :
            if task == 1:
                    
                if color_mode.value == 1:
                    # 红色
                    _, _, points_red = hsv_find(find_circle_frame, target_color='red', min_area=300, max_area=10000, scale_factor=0.5, kernel_size=(5,5), open_iterations=2, blur_kernel=(5,5))
                    process_circle(points_red, kalman_filter_obj_red, detect_counts, "red", (0, 0, 255), data_to_send, hdmi_display_frame)
                elif color_mode.value == 2:
                    # 蓝色
                    _, _, points_blue = hsv_find(find_circle_frame, target_color='blue', min_area=300, max_area=1000, scale_factor= 1, kernel_size=(3,3), open_iterations=3, blur_kernel=(5,5))
                    process_circle(points_blue, kalman_filter_obj_blue, detect_counts,"blue", (255, 0, 0), data_to_send, hdmi_display_frame)
                elif color_mode.value == 3:
                    # 绿色
                    _, _, points_green = hsv_find(find_circle_frame, target_color='green', min_area=500, max_area=10000, scale_factor=0.5, kernel_size=(3,3), open_iterations=3, blur_kernel=(3,3))
                    process_circle(points_green, kalman_filter_obj_green, detect_counts,"green", (0, 255, 0), data_to_send, hdmi_display_frame)
                elif color_mode.value == 4:
                    # 黄色
                    _, _, points_yellow = hsv_find(find_circle_frame, target_color='yellow', min_area=100, max_area=50000, scale_factor=0.5, kernel_size=(5,5), open_iterations=2, blur_kernel=(5,5))
                    process_circle(points_yellow, kalman_filter_obj_yellow, detect_counts,"yellow", (0, 255, 255), data_to_send, hdmi_display_frame)

            elif(task==2):
                if counts_step == 0:

                    gray = cv2.cvtColor(follow_frame, cv2.COLOR_BGR2GRAY)
                    _,bin_img = cv2.threshold(gray, 60, 255, cv2.THRESH_BINARY_INV)
                 
                    indexed_path_simplified = counts_line.process_image(
                    bin_img, 
                    offset_distance=5,
                    contour_type='inner', 
                    min_area=1000,
                    epsilon_factor=0.02, # 因子
                    offset_method = "morphological", # 使用几何方法进行偏移
                    subdivision_mode='simplified' # 明确指定
                    )
                    if indexed_path_simplified is not None:
                        counts_step_cnt += 1
                        hdmi_display_frame = counts_line.visualize(hdmi_display_frame)
                        print(f"轮廓检测进行中，当前计数: {counts_step_cnt}")
                        if counts_step_cnt > 20:
                            counts_step = 1
                            try:
                                follow_line = PathFollower(indexed_path_simplified,10)
                            except Exception as e:
                                print(f"[图像处理进程] 初始化 轮廓轨迹追踪初始化 失败: {e}")
                            print("轮廓检测结束，激光点检测开始")
                else:
                    _,_,current_red_center = hsv_find(original_frame, target_color='red', scale_factor=1.0, min_area=1000, max_area=10000)
                    hdmi_display_frame, current_red_center = process_circle(current_red_center, kalman_filter_obj_red_follow, detect_counts, "red", (0, 0, 255), data_to_send, hdmi_display_frame)
                    hdmi_display_frame = counts_line.visualize(hdmi_display_frame)
                    if current_red_center is not None:
                        index_targrt,points_target ,_ ,_ = follow_line.update(current_red_center)
                        if index_targrt is not None and points_target is not None:
                            target_x, target_y = current_red_center[0]-points_target[0], current_red_center[1]-points_target[1]
                            data_to_send.update({
                                'follow_x': int(target_x),
                                'follow_y': int(target_y)
                            })
                        else :
                            data_to_send.update({
                                'follow_x': -1,
                                'follow_y': -1
                            })
                            print("轮廓追踪失败，未找到目标点")
                    else:
                        data_to_send.update({
                            'follow_x': -1,
                            'follow_y': -1
                        })
                        print("轮廓追踪失败，未找到红色圆环")
                        
        if data_to_send and (task_yolov8_mode.value or task_find_circles_mode.value):
            try:
                data_queue.put(data_to_send, timeout=0.001)
            except queue.Full:
                print("[图像处理进程] data_queue 满了，跳过。")

        if hdmi_process_flag.value:
            if not hdmi_source.full():
                # x, y, w, h = bbox
                # cv2.rectangle(hdmi_display_frame, (x, y), (x+w-1, y+h-1), (0,255,0), 2)
                if center_green is not None:
                    cv2.circle(hdmi_display_frame, (int(center_green[0]), int(center_green[1])), 5, (0, 255, 0), -1)
                    cv2.putText(hdmi_display_frame, "Center", (int(center_green[0])-20, int(center_green[1])-20), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
                cv2.imwrite("/home/sunrise/cricket-training-questions/model/hdmi.jpg", hdmi_display_frame)
                hdmi_source.put((hdmi_display_frame), timeout=0.001)
            else:
                pass    
   
    print("[图像处理进程] 退出。")


# 摄像头读取进程
# -----------------------------------------------------------------------------
def camera_reader_process(
    run_flag,             # type: ignore # 用于控制进程启停的事件
    frame_flag,           # type: ignore # 用于控制帧读取的事件
    frame_queue,       # type: ignore # 用于将帧发送出去的队列
    camera_logical_name,    # 要读取的摄像头的逻辑名称 (例如 'cap_main', 'cap_aux')
    camera_settings # 可选的相机设置，例如 {'width': 1280, 'height': 720, 'fps': 30}
):
    """
    一个独立的进程，用于从指定的摄像头读取帧并通过队列发送。

    参数:
    - run_flag: multiprocessing.Event,全局运行/停止标志。
    - frame_queue: multiprocessing.Queue,用于发送 (camera_logical_name, frame) 元组。
    - camera_logical_name: str,在 find_specific_video_devices 返回的字典中查找的键。
    - camera_settings: dict, 可选,用于设置摄像头的分辨率和FPS。
                       例如: {'width': 1280, 'height': 720, 'fps': 30}
    """

    print(f"[摄像头进程 {camera_logical_name}] 启动。")
    cap = None
    device_identifier = None #可以是路径字符串或整数索引

    # 初始相机发现
    discovered_devices = find_specific_video_devices()
    if camera_logical_name in discovered_devices:
        device_identifier = discovered_devices[camera_logical_name]
        print(f"[摄像头进程 {camera_logical_name}] 找到设备标识: {device_identifier}")
    else:
        print(f"[摄像头进程 {camera_logical_name}] 启动时未找到设备。将尝试周期性查找。")

    retry_interval = 5  # 秒，如果相机打开失败，多久后重试
    frames_read = 0
    
    while run_flag.is_set():

        if  frame_flag.value:

            if cap is None or not cap.isOpened():
                # 如果没有找到设备标识，或者之前的尝试失败，则再次查找/尝试打开
                if device_identifier is None:
                    print(f"[摄像头进程 {camera_logical_name}] 尝试重新查找设备...")
                    discovered_devices = find_specific_video_devices()
                    if camera_logical_name in discovered_devices:
                        device_identifier = discovered_devices[camera_logical_name]
                        print(f"[摄像头进程 {camera_logical_name}] 重新找到设备标识: {device_identifier}")
                    else:
                        print(f"[摄像头进程 {camera_logical_name}] 仍未找到设备。将在 {retry_interval} 秒后重试。")
                        time.sleep(retry_interval)
                        continue # 回到 while run_flag.is_set() 的开头

                print(f"[摄像头进程 {camera_logical_name}] 尝试打开摄像头: {device_identifier}...")
        
                try:
            
                    if isinstance(device_identifier, str) and not device_identifier.startswith('/dev/video'):
                
                        try:
                            cap_id_int = int(device_identifier)
                            cap = cv2.VideoCapture(cap_id_int, cv2.CAP_V4L2) # 或者 cv2.CAP_ANY
                        except ValueError:
                            print(f"[摄像头进程 {camera_logical_name}] 无法将设备标识 '{device_identifier}' 转为整数。")
                            cap = None # 确保cap为None
                    elif isinstance(device_identifier, str): # 是 '/dev/videoX' 路径
                        cap = cv2.VideoCapture(device_identifier, cv2.CAP_V4L2) # 明确使用V4L2后端
                    else: # 假定是整数
                        cap = cv2.VideoCapture(device_identifier, cv2.CAP_V4L2)


                    if cap is None or not cap.isOpened(): # 双重检查
                        raise ConnectionError(f"无法使用标识符 {device_identifier} 打开摄像头。")

                    print(f"[摄像头进程 {camera_logical_name}] 摄像头 {device_identifier} 打开成功。")

                    # 应用相机设置 
                    if camera_settings:

                        # cap.set(cv2.CAP_PROP_AUTO_EXPOSURE, 0)  # 1 表示手动曝光模式 3：表示自动曝光模式
                        # cap.set(cv2.CAP_PROP_EXPOSURE, 20)  #曝光值需实验调整 
                        # cap.set(cv2.CAP_PROP_GAIN, 0.1)     # 增益值需实验调整

                        # 指定输出格式：MJPG = Motion-JPEG
                        #cap.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*'MJPG'))
                        if 'width' in camera_settings:
                            cap.set(cv2.CAP_PROP_FRAME_WIDTH, camera_settings['width'])
                        if 'height' in camera_settings:
                            cap.set(cv2.CAP_PROP_FRAME_HEIGHT, camera_settings['height'])
                        if 'fps' in camera_settings:
                            cap.set(cv2.CAP_PROP_FPS, camera_settings['fps'])
                        
                        # 验证设置是否生效 (可选)
                        actual_width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
                        actual_height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
                        actual_fps = cap.get(cv2.CAP_PROP_FPS)
                        print(f"[摄像头进程 {camera_logical_name}] 应用设置后 - 实际 Width: {actual_width}, Height: {actual_height}, FPS: {actual_fps}")


                except Exception as e: # 包括 ConnectionError
                    print(f"[摄像头进程 {camera_logical_name}] 打开摄像头 {device_identifier} 失败: {e}")
                    if cap is not None:
                        cap.release()
                    cap = None
                    device_identifier = None # 重置标识符，以便下次重新查找
                    print(f"[摄像头进程 {camera_logical_name}] 将在 {retry_interval} 秒后重试。")
                    time.sleep(retry_interval)
                    continue # 回到 while run_flag.is_set() 的开头

            # 如果摄像头已成功打开
            ret, frame = cap.read()

            if ret:
                frames_read += 1
                try:
                    # 将帧和相机名称一起放入队列
                    # 使用 block=True 和 timeout 来避免在队列满时无限期阻塞，并允许进程响应 run_flag
                    #frame = cv2.resize(frame, (640, 480),interpolation=cv2.INTER_AREA )
                    # h, w = frame.shape[:2]
                    # roi = frame[0:h, 100:460]
                    # frame = roi  
                    frame_queue.put((camera_logical_name,frame), timeout=0.02)
                except queue.Full:
                    # time.sleep(0.01)
                    # print(f"[摄像头进程 {camera_logical_name}] 警告: 帧队列已满，丢弃一帧。")
                    continue 
                except Exception as e:
                    print(f"[摄像头进程 {camera_logical_name}] 发送帧到队列时出错: {e}")
        
            else:
                print(f"[摄像头进程 {camera_logical_name}] 无法从摄像头读取帧。可能已断开连接。")
                cap.release()
                cap = None # 触发下次循环时的重新打开逻辑
                device_identifier = None # 重置标识符
                time.sleep(1) # 短暂等待后尝试重连
    # 清理
    if cap is not None:
        cap.release()
    print(f"[摄像头进程 {camera_logical_name}] 正在停止。")
    cv2.destroyAllWindows() # 虽然此进程不显示，但以防万一


def serial_worker(run_flag, mode_flag, port_name, baud_rate, write_q, read_q, retry_interval=5):
    """
    - mode_flag 仅控制写线程是否发送
    - 读线程只受 run_flag 和 stop_threads 控制
    """
    while run_flag.is_set():

        if not mode_flag.value:
            # 如果写功能被关闭，短暂休眠，但不影响读线程后面创建
            time.sleep(0.1)
        # 尝试打开串口
        try:
            ser = SerialComm(port_name, baud_rate, timeout=0.01)
            print(f"[{port_name}] 初始化成功。")
        except Exception as e:
            print(f"[{port_name}] 打开失败: {e}，{retry_interval}s 后重试")
            time.sleep(retry_interval)
            continue

        stop_threads = threading.Event()
        exception_counter = 0
        max_exceptions = 5

        def reader():
            nonlocal exception_counter
            print(f"[{port_name}] 读线程启动。")
            # 读线程不受 mode_flag 影响，只要 run_flag/stop_threads 就一直读
            while run_flag.is_set() and not stop_threads.is_set():
                try:
                    data = ser.receive()
                    if data:
                        read_q.put(data)
                    else:
                        time.sleep(0.001)
                except Exception as e:
                    exception_counter += 1
                    print(f"[{port_name} 读] 错误({exception_counter}): {e}")
                    time.sleep(0.5)
                    if exception_counter >= max_exceptions:
                        stop_threads.set()
            print(f"[{port_name}] 读线程停止。")

        def writer():
            nonlocal exception_counter
            print(f"[{port_name}] 写线程启动。")
            while run_flag.is_set() and not stop_threads.is_set():
                # 只有 mode_flag 打开时才发送
                if mode_flag.value:
                    try:
                        pkt = write_q.get(block=False)
                        if isinstance(pkt, dict):
                            pkt = json.dumps(pkt)
                        elif not isinstance(pkt, str):
                            pkt = str(pkt)
                        # ser.send(pkt,hex_mode=True)
                        ser.send(pkt)
                    except queue.Empty:
                        time.sleep(0.001)
                    except Exception as e:
                        exception_counter += 1
                        print(f"[{port_name} 写] 错误({exception_counter}): {e}")
                        time.sleep(0.5)
                        if exception_counter >= max_exceptions:
                            stop_threads.set()
                else:
                    # 写功能被关闭，等会儿再试
                    time.sleep(0.1)
            print(f"[{port_name}] 写线程停止。")

        # 启动线程
        t_r = threading.Thread(target=reader, daemon=True)
        t_w = threading.Thread(target=writer, daemon=True)
        t_r.start()
        t_w.start()

        # 主循环：等任一线程异常停止 或 全局/模式被关闭
        while run_flag.is_set() and not stop_threads.is_set():
            time.sleep(0.1)

        # 请求线程退出
        stop_threads.set()
        t_r.join()
        t_w.join()

        # 关闭串口
        try:
            ser.close()
            print(f"[{port_name}] 串口已关闭。")
        except Exception as e:
            print(f"[{port_name}] 关闭失败: {e}")

        # 如果全局或模式都已关闭，则不再重连
        if not run_flag.is_set():
            break

        print(f"[{port_name}] 断连，{retry_interval}s 后重连…")
        time.sleep(retry_interval)
    print(f"[{port_name}] Worker 退出。")

# --- 进程函数 ---
def serial_process_task(run_flag, ports_config):
    """
    串口管理进程：一个进程内并行多路串口。
    - run_flag: multiprocessing.Event,全局开/关
    - ports_config: list of tuples:
        (mode_flag, port_name, baud_rate, write_q, read_q)
    """
    print("[串口管理进程] 启动，共管理 %d 路串口。" % len(ports_config))
    threads = []
    # 为每路串口启动一个 worker 线程
    for mode_flag, port, baud, wq, rq in ports_config:
        th = threading.Thread(
            target=serial_worker,
            args=(run_flag, mode_flag, port, baud, wq, rq , 5),
            daemon=True
        )
        th.start()
        threads.append(th)

    # 等待全局停止
    while run_flag.is_set():
        time.sleep(0.1)

    print("[串口管理进程] 收到停止信号，等待线程退出…")
    for th in threads:
        th.join()
    print("[串口管理进程] 已退出。")



def overlay_image_alpha(bg, fg, x, y):
    """
    将 4 通道的 PNG 贴到 3 通道的背景图上
    - bg: 背景图，3 通道 BGR
    - fg: 前景图，4 通道 BGRA
    - x, y: 贴图的左上角坐标
    """
    h, w = fg.shape[:2]

    # 如果贴图超出边界，就裁剪（防止崩溃）
    if y + h > bg.shape[0] or x + w > bg.shape[1]:
        h = min(h, bg.shape[0] - y)
        w = min(w, bg.shape[1] - x)
        fg = fg[0:h, 0:w]

    # 拆分前景图
    b, g, r, a = cv2.split(fg)
    alpha = a / 255.0
    alpha = alpha[..., np.newaxis]  # shape: (h, w, 1)

    # 提取背景子区域
    roi = bg[y:y+h, x:x+w]

    # 混合
    blended = alpha * fg[:, :, :3] + (1 - alpha) * roi
    bg[y:y+h, x:x+w] = blended.astype(np.uint8)
    return bg

def hdmi_process_task(run_flag, hdmi_process_flag, hdmi_queue):
    """
    HDMI 显示进程任务
    参数:
      - run_flag: multiprocessing.Event，全局启动/停止控制标志
      - hdmi_process_flag: multiprocessing.Value('b') 是否开启 HDMI 显示
      - hdmi_queue: multiprocessing.Queue，用于接收要显示的帧
    """
   
    # 先判断是否要初始化
    if not run_flag.is_set():
        print("[HDMI 显示进程] run_flag 未设置，退出。")
        return

    # 初始化 HDMI 显示器
    try:
        hdmi_display_img = HDMI_Display()

        print("[HDMI 显示进程] HDMI_Display 初始化成功。")
    except Exception as e:
        print(f"[HDMI 显示进程] HDMI_Display 初始化失败: {e}")
        return

        
    try:
        # 主循环
        while run_flag.is_set():

            if hdmi_process_flag.value:
                try:
                    original_frame = hdmi_queue.get(timeout=0.01)
                  # 获取图像大小
                    h, w = original_frame.shape[:2]
                    # 基准尺寸
                    base_w, base_h = 1024, 600
                    # 缩放比例
                    scale_x = w / base_w
                    scale_y = h / base_h
                    stop_btn = (850, 500, 1000, 570)
                    # 按比例缩放按钮坐标
                    stop_btn = (
                        int(stop_btn[0] * scale_x),
                        int(stop_btn[1] * scale_y),
                        int(stop_btn[2] * scale_x),
                        int(stop_btn[3] * scale_y)
                    )
                    font_scale = 1 * ((scale_x + scale_y) / 2)   # 平均缩放
                    # 绘图
                    cv2.rectangle(original_frame, stop_btn[:2], stop_btn[2:], (200, 200, 200), 1)
                    cv2.putText(
                        original_frame, " Stop ",
                        (stop_btn[0]+int(10*scale_x), stop_btn[1]+int(40*scale_y)),
                        cv2.FONT_HERSHEY_SIMPLEX,font_scale,(0, 0, 255),2)

                except queue.Empty:
            
                    time.sleep(0.001)
                    continue

                if original_frame is not None:
                    ret = hdmi_display_img.display_frame(original_frame)
                    if ret != 0:
                        print(f"[HDMI 显示进程] display_frame 返回: {ret}")
            else:
                # 创建一张空白背景
                img = 255 * np.ones((600, 1024, 3), dtype=np.uint8)
                overlay = cv2.imread("/home/sunrise/cricket-training-questions/images/1.png", cv2.IMREAD_UNCHANGED)
                overlay_resized = cv2.resize(overlay, (150, 120), interpolation=cv2.INTER_AREA)

                img = overlay_image_alpha(img,overlay_resized, x=50, y=50)
                img = overlay_image_alpha(img,overlay_resized, x=300, y=50)
                img = overlay_image_alpha(img,overlay_resized, x=550, y=50)
             
              
               
                cv2.rectangle(img, BTN_A[:2], BTN_A[2:], (200,200,200), -1)
                cv2.putText(img, "  Start ", (BTN_A[0]+6, BTN_A[1]+40),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,0), 2)

                cv2.rectangle(img, BTN_B[:2], BTN_B[2:], (200,200,200), -1)
                cv2.putText(img, "  Reset ", (BTN_B[0]+6, BTN_B[1]+40),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,0), 2)
                
                cv2.rectangle(img, BTN_C[:2], BTN_C[2:], (200,200,200), -1)
                cv2.putText(img, "  ESC ", (BTN_C[0]+13, BTN_C[1]+40),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,0), 2)

                cv2.rectangle(img, BTN_A2[:2], BTN_A2[2:], (200,200,200), -1)
                cv2.putText(img, "  task1 ", (BTN_A2[0]+6, BTN_A2[1]+40),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,0), 2)

                cv2.rectangle(img, BTN_B2[:2], BTN_B2[2:], (200,200,200), -1)
                cv2.putText(img, "  task2 ", (BTN_B2[0]+6, BTN_B2[1]+40),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,0), 2)


                cv2.rectangle(img, BTN_C2[:2], BTN_C2[2:], (200,200,200), -1)
                cv2.putText(img, "  task3 ", (BTN_C2[0]+6, BTN_C2[1]+40),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,0), 2)


                cv2.rectangle(img, BTN_A3[:2], BTN_A3[2:], (200,200,200), -1)
                cv2.putText(img, "  task4 ", (BTN_A3[0]+6, BTN_A3[1]+40),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,0), 2)

                cv2.rectangle(img, BTN_B3[:2], BTN_B3[2:], (200,200,200), -1)
                cv2.putText(img, "  task5 ", (BTN_B3[0]+6, BTN_B3[1]+40),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,0), 2)

                cv2.rectangle(img, BTN_C3[:2], BTN_C3[2:], (200,200,200), -1)
                cv2.putText(img, "  task6 ", (BTN_C3[0]+6, BTN_C3[1]+40),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,0), 2)


                ret = hdmi_display_img.display_frame(img)
                if ret != 0:
                    print(f"[HDMI 显示进程] display_frame 返回: {ret}")


                
    except Exception as e:
        # 捕获 display_frame 过程中可能的其他异常
        print(f"[HDMI 显示进程] 错误: {e}")
        hdmi_display_img.close()
        print("[HDMI 显示进程] 已退出。")



TOUCH_REGION = {
    'regions': [
        {'BUTTON':(850, 500, 1000, 570)},
        {'BUTTON':(50, 200, 200, 270)},
        {'BUTTON':(50, 350, 200, 420)},
        {'BUTTON':(50, 500, 200, 570)},
        {'BUTTON':(300, 200, 450, 270)},
        {'BUTTON':(300, 350, 450, 420)},
        {'BUTTON':(300, 500, 450, 570)},
        {'BUTTON':(550, 200, 700, 270)},
        {'BUTTON':(550, 350, 700, 420)},
        {'BUTTON':(550, 500, 700, 570)},
    ],
}

def hdmi_touch_task(run_flag,touch_process_flag,touch_data_queue):
    """
    HDMI 显示进程任务
    参数:
      - run_flag: multiprocessing.Event，全局启动/停止控制标志
      - touch_process_flag: multiprocessing.Value('b') 是否开启 TOUCH 触摸屏
      - touch_data_queue: multiprocessing.Queue，用于接收触摸屏数据
    """
    # 先判断是否要初始化
    if not run_flag.is_set():
        print("[TOUCH 触摸进程] run_flag 未设置，退出。")
        return

    # 初始化 
    try:
        hdmi_touch = TouchScreenHandler("wch.cn CH57x", screen_width=1024, screen_height=600)
        print("[TOUCH 触摸进程] TouchScreenHandler 初始化成功。")
    except Exception as e:
        print(f"[TOUCH 触摸进程] TouchScreenHandler 初始化失败: {e}")
        return
    try:
        button_regions = [r['BUTTON'] for r in TOUCH_REGION['regions'] if 'BUTTON' in r]
        
    except Exception as e:
        print(f"[TOUCH 触摸进程] 解析按钮区域失败: {e}")
        return
    try:
        hdmi_touch.set_buttons(
                button_regions=button_regions,
                callbacks={
                    # 这里的回调不做实际逻辑，只把按钮 id 发给主进程
                    0: lambda: touch_data_queue.put(0,timeout=0.01),
                    1: lambda: touch_data_queue.put(1,timeout=0.01),
                    2: lambda: touch_data_queue.put(2,timeout=0.01),
                    3: lambda: touch_data_queue.put(3,timeout=0.01),
                    4: lambda: touch_data_queue.put(4,timeout=0.01),
                    5: lambda: touch_data_queue.put(5,timeout=0.01),
                    6: lambda: touch_data_queue.put(6,timeout=0.01),
                    7: lambda: touch_data_queue.put(7,timeout=0.01),
                    8: lambda: touch_data_queue.put(8,timeout=0.01),
                    9: lambda: touch_data_queue.put(9,timeout=0.01),
                }
            )
        # 主循环
        while run_flag.is_set():
       
            if touch_process_flag.value:
                try:
                    hdmi_touch.listen()

                except Exception as e:
                    print(f"[TOUCH 触摸进程] 错误: {e}")
    except Exception as e:
    
        print(f"[TOUCH 触摸进程] 错误: {e}")
        print("[TOUCH 触摸进程] 已退出。")

             
