import socketio
import subprocess
import argparse
from collections import deque
from ultralytics import YOLO, RTDETR
import cv2

import numpy as np
import time
import uuid
import requests
import json
import numpy as np
from threading import Thread
from urllib.parse import urlparse
from concurrent.futures import ThreadPoolExecutor

parser = argparse.ArgumentParser(description='An example command line argument parser')

parser.add_argument('-i', '--input', help='source', required=True)
parser.add_argument('-t', '--task', help='task', default='detect')
parser.add_argument('-s', '--show', help='show window', action='store_true')
parser.add_argument('-c', '--conf', help='conf', type=float, default=0.3)
parser.add_argument('-iou', '--iou', help='iou', type=float, default=0.4) 
parser.add_argument('-cls', '--cls', help='class filter', type=int, nargs='+', default=None)
parser.add_argument('-mid', '--mid', help='monitor id')

args = parser.parse_args()

model1 = YOLO('yolo11n.pt')
# Labels: {0: 'face', 1: 'face_masked'}
model2 = YOLO('mask2.pt')

results1 = model1.track(
    source=args.input,        # 输入源
    stream=True,             # 是否流式处理
    show=args.show,           # 是否显示结果
    vid_stride=5,            # 每 5 帧检测一次。处理视频时的帧跳跃参数，它决定了 YOLO 在视频推理时每隔多少帧进行一次检测。
    imgsz=[960, 1920],        # 输入图像大小
    conf=args.conf,           # 置信度阈值
    stream_buffer=False,    # 是否缓冲流
    save=False,             # 是否保存结果
    iou=args.iou,           # 交并比阈值
    half=True,              # 是否使用半精度
    classes=args.cls,         # 类过滤
)

results2 = model2.predict(
    source=args.input,        # 输入源
    stream=True,             # 是否流式处理
    show=args.show,           # 是否显示结果
    vid_stride=5,            # 每 5 帧检测一次。处理视频时的帧跳跃参数，它决定了 YOLO 在视频推理时每隔多少帧进行一次检测。
    imgsz=[960, 1920],        # 输入图像大小
    conf=args.conf,           # 置信度阈值
    stream_buffer=False,    # 是否缓冲流
    save=False,             # 是否保存结果
    iou=args.iou,           # 交并比阈值
    half=True,              # 是否使用半精度
    classes=args.cls,         # 类过滤
)


# Monitor ID
mid = args.mid if args.mid else args.input.split("/")[-1]
# 启动状态
started = False
# matrix area
area = {}
# alarm config
alarm_config = {}


# "robo/mask-wearing-iskms/6/weights.engine",   #口罩
# "robo/human-action-recognition-2000/4/weights.engine",    #行为1
# 使用队列存储最近10帧的检测结果
# detection_queue = deque(maxlen=10)

# fourcc = cv2.VideoWriter_fourcc(*"mp4v")
# out = cv2.VideoWriter("PARKING.mp4", fourcc, 30, (1920, 1080))


def draw_rounded_rectangle(image, top_left, bottom_right, color, radius, thickness):
    # Draw the rounded rectangle
    x1, y1 = top_left
    x2, y2 = bottom_right

    # Draw the straight edges
    cv2.rectangle(image, (x1 + radius, y1),
                  (x2 - radius, y2), color, thickness)
    cv2.rectangle(image, (x1, y1 + radius),
                  (x2, y2 - radius), color, thickness)

    # Draw the four arcs
    cv2.ellipse(image, (x1 + radius, y1 + radius),
                (radius, radius), 180, 0, 90, color, thickness)
    cv2.ellipse(image, (x2 - radius, y1 + radius),
                (radius, radius), 270, 0, 90, color, thickness)
    cv2.ellipse(image, (x1 + radius, y2 - radius),
                (radius, radius), 90, 0, 90, color, thickness)
    cv2.ellipse(image, (x2 - radius, y2 - radius),
                (radius, radius), 0, 0, 90, color, thickness)


def rounded_rectangle_corners(img, top_left, bottom_right, radius=1, color=(255, 255, 255), thickness=1, line_type=cv2.LINE_AA):
    p1 = top_left
    p2 = (bottom_right[0], top_left[1])
    p3 = (bottom_right[0], bottom_right[1])
    p4 = (top_left[0], bottom_right[1])

    height = abs(bottom_right[1] - top_left[1])
    width = abs(top_left[0] - bottom_right[0])

    if radius > 1:
        radius = 1

    corner_radius = int(radius * min(height / 2, width / 2))

    if thickness < 0:
        # big rect
        top_left_main_rect = (int(p1[0] + corner_radius), int(p1[1]))
        bottom_right_main_rect = (int(p3[0] - corner_radius), int(p3[1]))

        top_left_rect_left = (p1[0], p1[1] + corner_radius)
        bottom_right_rect_left = (p4[0] + corner_radius, p4[1] - corner_radius)

        top_left_rect_right = (p2[0] - corner_radius, p2[1] + corner_radius)
        bottom_right_rect_right = (p3[0], p3[1] - corner_radius)

        all_rects = [
            [top_left_main_rect, bottom_right_main_rect],
            [top_left_rect_left, bottom_right_rect_left],
            [top_left_rect_right, bottom_right_rect_right]]

        [cv2.rectangle(img, rect[0], rect[1], color, thickness)
         for rect in all_rects]

    # draw arcs
    cv2.ellipse(img, (p1[0] + corner_radius, p1[1] + corner_radius), (corner_radius, corner_radius), 180.0, 0, 90,
                color, thickness, line_type)
    cv2.ellipse(img, (p2[0] - corner_radius, p2[1] + corner_radius), (corner_radius, corner_radius), 270.0, 0, 90,
                color, thickness, line_type)
    cv2.ellipse(img, (p3[0] - corner_radius, p3[1] - corner_radius), (corner_radius, corner_radius), 0.0, 0, 90,
                color, thickness, line_type)
    cv2.ellipse(img, (p4[0] + corner_radius, p4[1] - corner_radius), (corner_radius, corner_radius), 90.0, 0, 90,
                color, thickness, line_type)

    return img


def draw_label(img, label, top_left, offset_y, bg_color=(50, 50, 50), font_color=(255, 255, 255)):
    font_scale = 0.6
    font_thickness = 1
    font = cv2.FONT_HERSHEY_DUPLEX

    # Get label size
    label_size = cv2.getTextSize(label, font, font_scale, font_thickness)[0]
    margin = 5  # 减小边距
    radius = 5  # 减小圆角半径

    # 调整标签位置，使其紧贴边界框
    label_top_left = (top_left[0], top_left[1] - label_size[1] - 2 * margin)  # 移除额外的偏移
    label_bottom_right = (label_top_left[0] + label_size[0] + 2 * margin, 
                         label_top_left[1] + label_size[1] + 2 * margin)

    # 确保标签不会超出图像边界
    label_top_left = (max(0, label_top_left[0]), max(0, label_top_left[1]))
    label_bottom_right = (min(img.shape[1], label_bottom_right[0]), 
                         min(img.shape[0], label_bottom_right[1]))

    # Draw the label background with transparency
    overlay = img.copy()
    draw_rounded_rectangle(overlay, label_top_left,
                           label_bottom_right, bg_color, radius, -1)
    alpha = 0.7
    cv2.addWeighted(overlay, alpha, img, 1 - alpha, 0, img)

    # Draw the label text
    cv2.putText(img, label, (label_top_left[0] + margin, 
                label_top_left[1] + label_size[1] + margin), 
                font, font_scale, font_color, font_thickness, lineType=cv2.LINE_AA)


Alert = {
    'CROWD': 'Crowd Detection',
    'GEO': 'Geo Fencing',
    'FACIAL': 'Facial Detection',
    'MASK': 'Mask Detection',
    'IDLE_TIME': 'Idle Time',
    'MOTION': 'Motion Detection'
}
colors = [(50, 150, 255), (80, 180, 255), (100, 200, 255), (120, 255, 255),
          (140, 255, 100), (160, 255, 150), (0, 0, 255), (180, 255, 200), (200, 255, 200)]

# 创建线程池
# executor = ThreadPoolExecutor(max_workers=16)


def run_on4(data1, data2):
    print(44444)
    # video_writer = cv2.VideoWriter(video_name, fourcc, FPS, dd)

    url = 'http://192.168.1.24:8080/alarms/trigger'
    # 以字典的形式构造数据
    data = {
        "alarm": data1,
        "mid": data2,


    }
    print('#####################alert ')
    # 与 get 请求一样，r 为响应对象
    r = requests.get(url, params=data)


def throttle(delay):
    def decorator(func):
        last_executed_time = 0
        def wrapper(*args, **kwargs):
            nonlocal last_executed_time
            # 获取当前时间
            current_time = time.time()
            # 计算距离上次执行的时间间隔
            elapsed_time = current_time - last_executed_time
            if elapsed_time >= delay:
                # 如果时间间隔大于等于指定的延迟时间，执行函数
                result = func(*args, **kwargs)
                last_executed_time = current_time
                return result
        return wrapper
    return decorator


@throttle(10)
def triggerAlertGeoFencing():
    do_trigger_alert(mid, Alert['GEO'])


@throttle(10)
def triggerAlertCrowd():
    do_trigger_alert(mid, Alert['CROWD'])
    
    
@throttle(10)
def triggerAlertFacial():
    do_trigger_alert(mid, Alert['FACIAL'])
    
    
@throttle(10)
def triggerAlertMask():
    do_trigger_alert(mid, Alert['MASK'])
    
    
@throttle(10)
def triggerAlertIdleTime():
    do_trigger_alert(mid, Alert['IDLE_TIME'])
    

@throttle(10)
def triggerAlertMotion():
    do_trigger_alert(mid, Alert['MOTION'])
    
    
def do_trigger_alert(mid, alarm):
    url = 'http://192.168.1.24:8080/alarms/trigger'
    data = {
        "alarm": alarm,
        "mid": mid,
    }
    requests.get(url, params=data)
    print(f'##################### TRIGGER ALERT: {alarm} {mid}')


# ffmpeg_command = ['ffmpeg',
#                 '-y',  # 覆盖输出文件
#                 '-f', 'rawvideo',  # 输入格式为原始视频
#                 '-pix_fmt', 'bgr24',  # 输入像素格式
#                 '-s', f"{3840}x{1920}",  # 输入视频大小
#                 # '-r', str(10),  # 帧率
#                 '-i', '-',  # 从stdin读取输入数据
#                 '-c:v', 'h264_qsv',  # 使用QSV编码
#                 '-b:v', '20M',  # 设置比特率为4Mbps
#                 '-g', '1',  # 关键帧间隔
#                 '-f', 'rtsp',  # 输出格式为RTSP
#                 'rtsp://192.168.1.24:8554/Camera116_va']  # RTSP流地址
ffmpeg_command = ['ffmpeg',
                  '-y',  # 覆盖输出文件
                  '-f', 'rawvideo',  # 输入格式为原始视频
                  '-pix_fmt', 'bgr24',  # 输入像素格式
                  '-s', f"{3840}x{1920}",  # 输入视频大小
                  '-r', str(30),  # 帧率
                  '-i', '-',  # 从stdin读取输入数据
                  '-c:v', 'libx264',  # 使用x264编码
                  '-preset', 'ultrafast',  # 编码预设
                  '-tune', 'zerolatency',  # 降低延迟
                  '-g', '5',
                  '-f', 'rtsp',  # 输出格式为RTSP
                  f'rtsp://192.168.1.24:8554/{mid}_va'  # RTSP流地址
                  ]
personCount = {'count': 0, 'start_time': 0}
ffmpeg_process = subprocess.Popen(ffmpeg_command, stdin=subprocess.PIPE)


def process_detections(frame, detections, color, mod):
    human_images = []
    human_boxes = []
    for detection in detections:
        # if detection.cls == 0:  # 假设类0是"人"
        x1, y1, x2, y2 = map(int, detection.xyxy[0])
        human_image = frame[y1:y2, x1:x2]
        human_images.append(human_image)
        human_boxes.append((x1, y1, x2, y2))

        # 获取类别标签文本
        class_id = int(detection.cls.item())
        class_name = mod.names[class_id]  # 从model.names获取类别名称
        confidence = float(detection.conf.item())  # 获取置信度
        
        # 绘制边框和标签
        track_id = int(detection.id.item()) if detection.id is not None else 0  # 获取追踪ID并转换为整数
        label = f"{class_name} {confidence:.2f}" + (f" ID:{track_id}" if track_id != 0 else "")
        
        rounded_rectangle_corners(frame, (x1, y1), (x2, y2), radius=0.5, color=color, thickness=2)
        draw_label(frame, label, (x1, y1), 0,  bg_color=(0, 255, 0), font_color=(0, 0, 0))
    return human_images, human_boxes


# threshold: N秒后触发报警
personGeoObj = { 'current_time' : 0, 'entry_time' : 0, 'last_emit_time' : 0 }
def geoFencing(human_boxes, threshold):
    global area, personGeoObj
    if "geofencing" not in area or len(area["geofencing"]) == 0:
        return

    current_time = time.time()
    
    centers = [(int((x1 + x2) / 2), int(y2)) for x1, y1, x2, y2 in human_boxes]
    # centers = [(int((x1 + x2) / 2), int((y1 + y2) / 2)) for x1, y1, x2, y2 in human_boxes]
    for center in centers:
        polygons = [np.array(polygon, dtype=np.int32) for polygon in area["geofencing"]]
        for idx, polygon in enumerate(polygons):
            result = cv2.pointPolygonTest(polygon, center, False)
            if result >= 0:
                # personGeoObj['current_time'] = time.time()
                # if personGeoObj['entry_time'] == 0:
                #     personGeoObj['entry_time'] = current_time
                # # 计算该人已在区域内停留的时间
                # elapsed_time = personGeoObj['current_time'] - personGeoObj['entry_time']
                # # 2秒内丢失目标，则重置时间
                # if personGeoObj['current_time'] - personGeoObj['last_emit_time'] >= 2:
                #     personGeoObj = { 'current_time' : 0, 'entry_time' : 0, 'last_emit_time' : 0 }
                # # 超过N秒，则触发报警
                # elif elapsed_time >= threshold:
                #     personGeoObj = { 'current_time' : 0, 'entry_time' : 0, 'last_emit_time' : 0 }
                triggerAlertGeoFencing()
                # # 正常情况，记录上一次进入区域的时间
                # else:
                    # personGeoObj['last_emit_time'] = current_time


# threshold: N秒后触发报警
idleTimeObj = {}
def idleTimeDetection(detections, threshold):
    global area
    if "idleTime" not in area or len(area["idleTime"]) == 0:
        return
    human_boxes = detections.cpu().xyxy.numpy()
    centers = [(int((x1 + x2) / 2), int(y2)) for x1, y1, x2, y2 in human_boxes]
    for i, center in enumerate(centers):
        midpoint_c_x, midpoint_c_y = center
        midpoint_c_xy = (int(midpoint_c_x), int(midpoint_c_y))

        polygons = [np.array(polygon, dtype=np.int32) for polygon in area["idleTime"]]
        in_area = False

        # 获取追踪的ID
        track_id = int(detections[i].id.item()) if detections[i].id is not None else 0
        # 如果追踪ID为0，则跳过
        if track_id == 0:
            continue
        
        for idx, polygon in enumerate(polygons):
            result = cv2.pointPolygonTest(polygon, midpoint_c_xy, False)
            if result >= 0:
                in_area = True
                break
            
        # 如果不在区域内，则跳过
        if not in_area:
             # 如果人员离开区域，可以选择删除其进入时间记录
            if track_id in idleTimeObj:
                del idleTimeObj[track_id]
            continue

        # 如果该ID在区域内，记录或更新进入时间
        if track_id not in idleTimeObj:
            idleTimeObj[track_id] = { 'entry_time': time.time() }
        # 计算逗留时间
        stay_time = time.time() - idleTimeObj[track_id]["entry_time"]
        if stay_time > threshold:
            triggerAlertIdleTime()
        
            
# 用于控制每秒发一次数据
last_emit_time = 0  # 记录上次发送时间
emit_seconds = 1    # 发送间隔（秒）
def peopleCounting(human_boxes):
    global last_emit_time
    # 获取当前时间戳
    current_time = time.time()
    
    count = do_count_people(human_boxes)
                    
    # 每秒钟只发送一次事件
    if current_time - last_emit_time >= emit_seconds:  # 1秒钟
        sio.emit('c_people_counting', {'count': count, 'mid': mid})
        last_emit_time = current_time  # 更新上次发送时间
        
        
def crowdDetection(human_boxes, threshold):
    count = do_count_people(human_boxes)
    if count > threshold:
        triggerAlertCrowd()
    
        
def facialDetection(human_boxes):
    global area, personGeoObj
    # 未指定区域，全图统计
    if "facial" not in area or len(area["facial"]) == 0:
        if len(human_boxes) > 0:
            triggerAlertFacial()
    # 存在指定区域，只统计指定区域
    else:
        centers = [(int((x1 + x2) / 2), int(y2)) for x1, y1, x2, y2 in human_boxes]
        for center in centers:
            polygons = [np.array(polygon, dtype=np.int32) for polygon in area["facial"]]
            for idx, polygon in enumerate(polygons):
                result = cv2.pointPolygonTest(polygon, center, False)
                if result >= 0:
                    triggerAlertFacial()
     
                    
def maskDetection(detections):
    global area
    # 未指定区域，全图统计
    if "mask" not in area or len(area["mask"]) == 0:
        if len(detections) > 0:
            for detection in detections:
                # Labels: {0: 'face', 1: 'face_masked'}
                class_id = int(detection.cls.item())
                if class_id == 0:
                    triggerAlertMask()
    # 存在指定区域，只统计指定区域
    else:
        human_boxes = detections.cpu().xyxy.numpy()
        centers = [(int((x1 + x2) / 2), int(y2)) for x1, y1, x2, y2 in human_boxes]
        for center in centers:
            polygons = [np.array(polygon, dtype=np.int32) for polygon in area["mask"]]
            for idx, polygon in enumerate(polygons):
                result = cv2.pointPolygonTest(polygon, center, False)
                if result >= 0:
                    if detections[idx].cls.item() == 0:
                        triggerAlertMask()


previous_frame = None
last_update_time = time.time()
def motionDetection(frame, sensitivity, interval=2):
    """
    运动检测（检测目标区域是否有运动）
    :param frame: 当前帧
    :param sensitivity: 灵敏度（数值越小，检测越敏感）
    :param interval: 参考帧更新间隔（秒）
    """
    global previous_frame, last_update_time, area

    # 转换为灰度图并降噪
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    gray = cv2.GaussianBlur(gray, (5, 5), 0)

    # 更新参考帧
    if previous_frame is None or (time.time() - last_update_time) > interval:
        previous_frame = gray
        last_update_time = time.time()
        return False  # 参考帧刚更新，暂不检测

    # 计算帧差
    frame_delta = cv2.absdiff(previous_frame, gray)
    _, thresh = cv2.threshold(frame_delta, sensitivity, 255, cv2.THRESH_BINARY)
    thresh = cv2.dilate(thresh, None, iterations=2)

    # 轮廓检测
    contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

    # **自动计算运动像素阈值**
    motion_threshold = max(300, 50 * (50 - sensitivity))  # 灵敏度越高，运动像素阈值越大

    # **1️⃣ 全图检测**
    if "motion" not in area or len(area["motion"]) == 0:
        print("###### motion:", sum(cv2.contourArea(c) for c in contours), motion_threshold)
        if sum(cv2.contourArea(c) for c in contours) > motion_threshold:
            triggerAlertMotion()
            return True
        return False

    # **2️⃣ 限定区域检测**
    polygons = [np.array(polygon, dtype=np.int32) for polygon in area["motion"]]
    for contour in contours:
        # 计算运动区域的外接矩形
        x, y, w, h = cv2.boundingRect(contour)
        motion_box = np.array([[x, y], [x + w, y], [x + w, y + h], [x, y + h]], dtype=np.int32)

        # 检查是否有重叠区域
        for polygon in polygons:
            # 计算多边形交集面积
            intersection_area = cv2.intersectConvexConvex(motion_box, polygon)[0]
            print("###### motion:", intersection_area, motion_threshold)
            if intersection_area > motion_threshold:
                triggerAlertMotion()
                return True
    return False


def do_count_people(human_boxes):
    count = 0
    # 未指定区域，全图统计
    if "peopleCounting" not in area or len(area["peopleCounting"]) == 0:
        count = len(human_boxes)
    # 存在指定区域，只统计指定区域
    else:
        centers = [(int((x1 + x2) / 2), int(y2)) for x1, y1, x2, y2 in human_boxes]
        for center in centers:
            polygons = [np.array(polygon, dtype=np.int32) for polygon in area["peopleCounting"]]
            for idx, polygon in enumerate(polygons):
                result = cv2.pointPolygonTest(polygon, center, False)
                if result >= 0:
                    count += 1
    return count


requests.packages.urllib3.disable_warnings()
http_session = requests.Session()
http_session.verify = False
sio = socketio.Client(http_session=http_session)

@sio.event
def connect():
    print('connected')
    # init 
    sio.emit('c_va_status', {'status': 'connected', 'mid': mid})
    sio.emit('c_va_init', {'mid': mid})

@sio.event
def connect_error(err):
    print(err)

# include alarm and matrix
@sio.on('c_update_settings')
def on_c_update_alarm(data):
    global area, alarm_config
    print('Received c_update_settings', data)
    if data['mid'] != mid:
        return
    if 'matrix' in data['settings']:
        area = data["settings"]["matrix"]
    if 'alarm' in data['settings']:
        alarm_config = data["settings"]["alarm"]

@sio.on('c_update_matrix')
def on_c_update_matrix(data):
    global area
    print('Received c_update_matrix', data)
    if data['mid'] != mid:
        return
    area = data["matrix"] if 'matrix' in data else area

@sio.on('c_ping')
def on_c_ping(data):
    print(f"Received Ping")
    sio.emit('c_pong', "pong")

sio.connect('http://127.0.0.1:3000', transports=['websocket'])


def draw_matrix_roi(frame, points, color, im0):
    pts = len(points)
    # 先绘制矩阵的边框
    if pts > 1:
        for i in range(1, pts):
            cv2.line(frame, points[i - 1], points[i], color, 2)
        cv2.line(frame, points[-1], points[0], color, 2)  # 将最后一个点连接到第一个点


def draw_matrix_overlay(frame, all_points, im0):
    overlay = im0.copy()
    alpha = 0.1  # Adjust alpha for transparency
    for (points, color) in all_points:
        if len(points) > 2:  # Ensure we have enough points to form a polygon
            # Convert each polygon's points into a NumPy array in the required format
            pts_array = np.array(points, np.int32).reshape((-1, 1, 2))
            # Draw the filled polygon on the overlay
            cv2.fillPoly(overlay, [pts_array], color=color)
    # Apply the overlay to the original frame
    cv2.addWeighted(overlay, alpha, frame, 1 - alpha, 0, frame)


# r1: geofencing, peopleCounting, crowd
# r2: mask, facial
for r1, r2 in zip(results1, results2):
    if not started:
        sio.emit('c_va_status', {'status': 'started', 'mid': mid})
        started = True
    
    frame = r1.orig_img  # 获取当前帧图像
    detections1 = r1.boxes  # 获取检测到的框
    detections2 = r2.boxes  # 获取检测到的框
    
    # geofencing
    if 'geofencing' in alarm_config and alarm_config['geofencing']['enabled']:
        threshold = alarm_config['geofencing']['threshold'] if 'threshold' in alarm_config['geofencing'] else 0
        geoFencing(detections1.cpu().xyxy.numpy(), threshold)
    
    # people counting
    if 'peopleCounting' in alarm_config and alarm_config['peopleCounting']['enabled']:
        peopleCounting(detections1.cpu().xyxy.numpy())
    
    # crowd
    if 'crowd' in alarm_config and alarm_config['crowd']['enabled']:
        threshold = alarm_config['crowd']['threshold'] if 'threshold' in alarm_config['crowd'] else 10
        crowdDetection(detections1.cpu().xyxy.numpy(), threshold)
    
    # facial
    if 'facial' in alarm_config and alarm_config['facial']['enabled']:
        facialDetection(detections2.cpu().xyxy.numpy())
    
    # mask
    if 'mask' in alarm_config and alarm_config['mask']['enabled']:
        maskDetection(detections2)
        
    # idle time
    if 'idleTime' in alarm_config and alarm_config['idleTime']['enabled']:
        threshold = alarm_config['idleTime']['threshold'] if 'threshold' in alarm_config['idleTime'] else 10
        idleTimeDetection(detections1, threshold)
        
    # motion
    if 'motion' in alarm_config and alarm_config['motion']['enabled']:
        threshold = alarm_config['motion']['threshold'] if 'threshold' in alarm_config['motion'] else 10
        motionDetection(frame, threshold)
    
    if False:
        overlay_points = []
        for index, (key, points_list) in enumerate(area.items()): 
            color = colors[index] if index < len(colors) else (255, 255, 255)  # Default white
            for points in points_list:
                draw_matrix_roi(frame, points, color, r1.orig_img)
                overlay_points.append((points, color))
        draw_matrix_overlay(frame, overlay_points, r1.orig_img)

    process_detections(frame, detections1, (0, 255, 0), model1)
    process_detections(frame, detections2, (0, 255, 0), model2)

    cv2.namedWindow('fullscreen', cv2.WINDOW_NORMAL)
    cv2.resizeWindow('fullscreen', 960, 480)  # 设置默认大小为全屏窗口大小
    cv2.imshow("fullscreen", frame)
    ffmpeg_process.stdin.write(frame.tobytes())
    if cv2.waitKey(1) & 0xFF == ord('q'):
        break
