import socketio
import subprocess
import argparse
from collections import deque
from ultralytics import YOLO, RTDETR
import cv2

import numpy as np
import time
import requests
import numpy as np
from threading import Thread
from urllib.parse import urlparse
from concurrent.futures import ThreadPoolExecutor
from itertools import zip_longest

import logging


parser = argparse.ArgumentParser(description='An example command line argument parser')

parser.add_argument('-i', '--input', help='source', required=True)
parser.add_argument('-t', '--task', help='task', default='detect')
parser.add_argument('-s', '--show', help='show window', action='store_true')
parser.add_argument('-c', '--conf', help='conf', type=float, default=0.3)
parser.add_argument('-iou', '--iou', help='iou', type=float, default=0.4) 
parser.add_argument('-cls', '--cls', help='class filter', type=int, nargs='+', default=None)
parser.add_argument('-mid', '--mid', help='monitor id')
parser.add_argument('-m', '--models', nargs='+', choices=['people', 'face'], 
                    help='models to load: people and/or face', required=True)
parser.add_argument('-th', '--throttle', help='throttle seconds', type=int, default=10)

args = parser.parse_args()


# Monitor ID
mid = args.mid if args.mid else args.input.split("/")[-1]
# 启动状态
started = False
# matrix area
area = {}
# alarm config
alarm_config = {}
# 推流进程
ffmpeg_process  = None
# 是否开启ffmeg推流
is_push_stream = True
# 是否显示屏幕
is_show_screen = False  
# 是否绘制矩阵
is_draw_matrix = True
# 每个报警的冷却时间（秒）
throttle_seconds = args.throttle


# 根据 models 参数选择性加载模型
model1 = None
model2 = None

# 设置日志级别
logging.getLogger('ultralytics').setLevel(logging.ERROR)


"""
# predict   track

:param imgsz: 推理时模型输入图像的尺寸，[height, width]
设置	                 会变形吗	                  推荐使用场景
imgsz = 640	            ❌ 不会变形（会 padding）	⚡ 通用推理（快 + 小）
imgsz = [960, 1920]	    ❌ 不会变形	                🎯 保留更多细节（适合小目标）

"""
common_args = {
    "source": args.input,         # 输入源，可以是视频文件、图片路径或摄像头等
    "stream": True,               # 是否启用流式处理（适用于视频或摄像头输入）
    "show": args.show,            # 是否实时显示推理结果窗口
    "vid_stride": 5,              # 每 N 帧检测一次，提高性能，适用于视频流
    "imgsz": 1920,                 # 推理时模型输入图像的尺寸，[height, width]
    "conf": args.conf,            # 置信度阈值，低于该值的检测框将被忽略
    "stream_buffer": False,       # 是否启用流缓冲，关闭可降低延迟
    "save": False,                # 是否保存检测结果到文件
    "iou": args.iou,              # IoU（交并比）阈值，用于非极大值抑制
    "half": True,                 # 是否使用半精度推理（float16），加速推理
    "classes": args.cls,          # 要检测的类别索引列表，仅检测这些类别
}

if 'people' in args.models:
    """
    yolo11n: 精度中，CPU消耗中 CPU 2~5%
    yolov8n: 精度高，CPU消耗高 CPU<8%
    yolov5nu: 精度低，CPU消耗低 CPU<2%
    """
    model1 = YOLO('yolo11n_openvino_model/')
    # model1 = YOLO('yolov8n_openvino_model/')
    # model1 = YOLO('yolov5nu_openvino_model/')
    results1 = model1.track(
        **common_args
    )
    print(f'##################### LOAD MODEL people')

if 'face' in args.models:
    model2 = YOLO('mask2_openvino_model/')
    results2 = model2.predict(
        **common_args
    )
    print(f'##################### LOAD MODEL face')


def draw_rounded_rectangle(image, top_left, bottom_right, color, radius, thickness):
    # Draw the rounded rectangle
    x1, y1 = top_left
    x2, y2 = bottom_right

    # Draw the straight edges
    cv2.rectangle(image, (x1 + radius, y1),
                  (x2 - radius, y2), color, thickness)
    cv2.rectangle(image, (x1, y1 + radius),
                  (x2, y2 - radius), color, thickness)

    # Draw the four arcs
    cv2.ellipse(image, (x1 + radius, y1 + radius),
                (radius, radius), 180, 0, 90, color, thickness)
    cv2.ellipse(image, (x2 - radius, y1 + radius),
                (radius, radius), 270, 0, 90, color, thickness)
    cv2.ellipse(image, (x1 + radius, y2 - radius),
                (radius, radius), 90, 0, 90, color, thickness)
    cv2.ellipse(image, (x2 - radius, y2 - radius),
                (radius, radius), 0, 0, 90, color, thickness)


def rounded_rectangle_corners(img, top_left, bottom_right, radius=1, color=(255, 255, 255), thickness=1, line_type=cv2.LINE_AA):
    p1 = top_left
    p2 = (bottom_right[0], top_left[1])
    p3 = (bottom_right[0], bottom_right[1])
    p4 = (top_left[0], bottom_right[1])

    height = abs(bottom_right[1] - top_left[1])
    width = abs(top_left[0] - bottom_right[0])

    if radius > 1:
        radius = 1

    corner_radius = int(radius * min(height / 2, width / 2))

    if thickness < 0:
        # big rect
        top_left_main_rect = (int(p1[0] + corner_radius), int(p1[1]))
        bottom_right_main_rect = (int(p3[0] - corner_radius), int(p3[1]))

        top_left_rect_left = (p1[0], p1[1] + corner_radius)
        bottom_right_rect_left = (p4[0] + corner_radius, p4[1] - corner_radius)

        top_left_rect_right = (p2[0] - corner_radius, p2[1] + corner_radius)
        bottom_right_rect_right = (p3[0], p3[1] - corner_radius)

        all_rects = [
            [top_left_main_rect, bottom_right_main_rect],
            [top_left_rect_left, bottom_right_rect_left],
            [top_left_rect_right, bottom_right_rect_right]]

        [cv2.rectangle(img, rect[0], rect[1], color, thickness)
         for rect in all_rects]

    # draw arcs
    cv2.ellipse(img, (p1[0] + corner_radius, p1[1] + corner_radius), (corner_radius, corner_radius), 180.0, 0, 90,
                color, thickness, line_type)
    cv2.ellipse(img, (p2[0] - corner_radius, p2[1] + corner_radius), (corner_radius, corner_radius), 270.0, 0, 90,
                color, thickness, line_type)
    cv2.ellipse(img, (p3[0] - corner_radius, p3[1] - corner_radius), (corner_radius, corner_radius), 0.0, 0, 90,
                color, thickness, line_type)
    cv2.ellipse(img, (p4[0] + corner_radius, p4[1] - corner_radius), (corner_radius, corner_radius), 90.0, 0, 90,
                color, thickness, line_type)

    return img


def draw_label(img, label, top_left, offset_y, bg_color=(50, 50, 50), font_color=(255, 255, 255)):
    font_scale = 0.6
    font_thickness = 1
    font = cv2.FONT_HERSHEY_DUPLEX

    # Get label size
    label_size = cv2.getTextSize(label, font, font_scale, font_thickness)[0]
    margin = 5  # 减小边距
    radius = 5  # 减小圆角半径

    # 调整标签位置，使其紧贴边界框
    label_top_left = (top_left[0], top_left[1] - label_size[1] - 2 * margin)  # 移除额外的偏移
    label_bottom_right = (label_top_left[0] + label_size[0] + 2 * margin, 
                         label_top_left[1] + label_size[1] + 2 * margin)

    # 确保标签不会超出图像边界
    label_top_left = (max(0, label_top_left[0]), max(0, label_top_left[1]))
    label_bottom_right = (min(img.shape[1], label_bottom_right[0]), 
                         min(img.shape[0], label_bottom_right[1]))

    # Draw the label background with transparency
    overlay = img.copy()
    draw_rounded_rectangle(overlay, label_top_left,
                           label_bottom_right, bg_color, radius, -1)
    alpha = 0.7
    cv2.addWeighted(overlay, alpha, img, 1 - alpha, 0, img)

    # Draw the label text
    cv2.putText(img, label, (label_top_left[0] + margin, 
                label_top_left[1] + label_size[1] + margin), 
                font, font_scale, font_color, font_thickness, lineType=cv2.LINE_AA)


Alert = {
    'CROWD': 'Crowd Detection',
    'GEO': 'Geo Fencing',
    'FACIAL': 'Facial Detection',
    'MASK': 'Mask Detection',
    'IDLE_TIME': 'Idle Time',
    'MOTION': 'Motion Detection',
    'LINE_CROSSING': 'Line Crossing'
}
colors = [(50, 150, 255), (80, 180, 255), (100, 200, 255), (120, 255, 255),
          (140, 255, 100), (160, 255, 150), (0, 0, 255), (180, 255, 200), (200, 255, 200)]


def throttle():
    def decorator(func):
        last_executed_time = 0
        def wrapper(*args, **kwargs):
            nonlocal last_executed_time
            global throttle_seconds
            # 获取当前时间
            current_time = time.time()
            # 计算距离上次执行的时间间隔
            elapsed_time = current_time - last_executed_time
            # 使用当前的 throttle_seconds 值，而不是装饰器创建时的值
            if elapsed_time >= throttle_seconds:
                # 如果时间间隔大于等于指定的延迟时间，执行函数
                result = func(*args, **kwargs)
                last_executed_time = current_time
                return result
        return wrapper
    return decorator


@throttle()
def triggerAlertGeoFencing():
    do_trigger_alert(mid, Alert['GEO'])


@throttle()
def triggerAlertCrowd():
    do_trigger_alert(mid, Alert['CROWD'])
    
    
@throttle()
def triggerAlertFacial():
    do_trigger_alert(mid, Alert['FACIAL'])
    
    
@throttle()
def triggerAlertMask():
    do_trigger_alert(mid, Alert['MASK'])
    
    
@throttle()
def triggerAlertIdleTime():
    do_trigger_alert(mid, Alert['IDLE_TIME'])
    

@throttle()
def triggerAlertMotion():
    do_trigger_alert(mid, Alert['MOTION'])


@throttle()
def triggerAlertLineCrossing():
    do_trigger_alert(mid, Alert['LINE_CROSSING'])
    
    
def do_trigger_alert(mid, alarm):
    url = 'http://192.168.1.24:8080/alarms/trigger'
    data = {
        "alarm": alarm,
        "mid": mid,
    }
    requests.get(url, params=data)
    print(f'##################### TRIGGER ALERT: {alarm} {mid}')


def process_detections(frame, detections, color, mod):
    if not is_show_screen and not is_push_stream:
        return
    human_images = []
    human_boxes = []
    for detection in detections:
        # if detection.cls == 0:  # 假设类0是"人"
        x1, y1, x2, y2 = map(int, detection.xyxy[0])
        human_image = frame[y1:y2, x1:x2]
        human_images.append(human_image)
        human_boxes.append((x1, y1, x2, y2))

        # 获取类别标签文本
        class_id = int(detection.cls.item())
        class_name = mod.names[class_id]  # 从model.names获取类别名称
        confidence = float(detection.conf.item())  # 获取置信度
        
        # 绘制边框和标签
        track_id = int(detection.id.item()) if detection.id is not None else 0  # 获取追踪ID并转换为整数
        label = f"{class_name} {confidence:.2f}" + (f" ID:{track_id}" if track_id != 0 else "")
        
        rounded_rectangle_corners(frame, (x1, y1), (x2, y2), radius=0.5, color=color, thickness=2)
        draw_label(frame, label, (x1, y1), 0,  bg_color=(0, 255, 0), font_color=(0, 0, 0))
    return human_images, human_boxes


# threshold: N秒后触发报警
personGeoObj = { 'current_time' : 0, 'entry_time' : 0, 'last_entry_time' : 0 }
def geoFencing(human_boxes, threshold):
    global area, personGeoObj
    if "geofencing" not in area or len(area["geofencing"]) == 0:
        # 如果没有 geofencing 区域，直接重置
        personGeoObj = {'current_time': 0, 'entry_time': 0, 'last_entry_time': 0}
        return

    current_time = time.time()
    inside_zone = False  # 记录当前帧是否有人在区域内

    centers = [(int((x1 + x2) / 2), int(y2)) for x1, y1, x2, y2 in human_boxes]
    
    for center in centers:
        polygons = [np.array(polygon, dtype=np.int32) for polygon in area["geofencing"]]
        for polygon in polygons:
            result = cv2.pointPolygonTest(polygon, center, False)
            if result >= 0:
                inside_zone = True

                # 目标进入区域，更新检测时间
                personGeoObj['current_time'] = current_time
                personGeoObj['last_entry_time'] = current_time  # 记录最后一次检测到目标的时间
                
                # 目标首次进入区域，记录 entry_time
                if personGeoObj['entry_time'] == 0:
                    personGeoObj['entry_time'] = current_time
                
                # 计算停留时间
                elapsed_time = current_time - personGeoObj['entry_time']
                
                print(f"Elapsed time in zone: {elapsed_time:.2f}s, Threshold: {threshold}s")

                # 如果停留超过阈值时间，则触发报警，并立即重置
                if elapsed_time >= threshold:
                    triggerAlertGeoFencing()
                    personGeoObj = {'current_time': 0, 'entry_time': 0, 'last_entry_time': 0}
                
                break  # 目标已匹配，退出循环

    # **目标丢失逻辑**
    # 如果当前帧没有检测到目标，并且目标已经离开超过 2 秒，则重置
    if not inside_zone and personGeoObj['last_entry_time'] > 0:
        if current_time - personGeoObj['last_entry_time'] >= 2:
            personGeoObj = {'current_time': 0, 'entry_time': 0, 'last_entry_time': 0}
            print("Target lost for 2 seconds, resetting timer.")
                    

# threshold: N秒后触发报警
idleTimeObj = {}
def idleTimeDetection(detections, threshold):
    global area
    if "idleTime" not in area or len(area["idleTime"]) == 0:
        return
    human_boxes = detections.cpu().xyxy.numpy()
    centers = [(int((x1 + x2) / 2), int(y2)) for x1, y1, x2, y2 in human_boxes]
    for i, center in enumerate(centers):
        midpoint_c_x, midpoint_c_y = center
        midpoint_c_xy = (int(midpoint_c_x), int(midpoint_c_y))

        polygons = [np.array(polygon, dtype=np.int32) for polygon in area["idleTime"]]
        in_area = False

        # 获取追踪的ID
        track_id = int(detections[i].id.item()) if detections[i].id is not None else 0
        # 如果追踪ID为0，则跳过
        if track_id == 0:
            continue
        
        for idx, polygon in enumerate(polygons):
            result = cv2.pointPolygonTest(polygon, midpoint_c_xy, False)
            if result >= 0:
                in_area = True
                break
            
        # 如果不在区域内，则跳过
        if not in_area:
             # 如果人员离开区域，可以选择删除其进入时间记录
            if track_id in idleTimeObj:
                del idleTimeObj[track_id]
            continue

        # 如果该ID在区域内，记录或更新进入时间
        if track_id not in idleTimeObj:
            idleTimeObj[track_id] = { 'entry_time': time.time() }
        # 计算逗留时间
        stay_time = time.time() - idleTimeObj[track_id]["entry_time"]
        if stay_time > threshold:
            triggerAlertIdleTime()
        
            
# 用于控制每秒发一次数据
last_emit_time = 0  # 记录上次发送时间
emit_seconds = 1    # 发送间隔（秒）
def peopleCounting(human_boxes):
    global last_emit_time
    # 获取当前时间戳
    current_time = time.time()
    
    count = do_count_people(human_boxes)
                    
    # 每秒钟只发送一次事件
    if current_time - last_emit_time >= emit_seconds:  # 1秒钟
        sio.emit('c_people_counting', {'count': count, 'mid': mid})
        last_emit_time = current_time  # 更新上次发送时间
        
        
def crowdDetection(human_boxes, threshold):
    count = do_count_people(human_boxes)
    if count >= threshold:
        triggerAlertCrowd()
    
        
def facialDetection(human_boxes):
    global area, personGeoObj
    # 未指定区域，全图统计
    if "facial" not in area or len(area["facial"]) == 0:
        if len(human_boxes) > 0:
            triggerAlertFacial()
    # 存在指定区域，只统计指定区域
    else:
        centers = [(int((x1 + x2) / 2), int(y2)) for x1, y1, x2, y2 in human_boxes]
        for center in centers:
            polygons = [np.array(polygon, dtype=np.int32) for polygon in area["facial"]]
            for idx, polygon in enumerate(polygons):
                result = cv2.pointPolygonTest(polygon, center, False)
                if result >= 0:
                    triggerAlertFacial()
     
                    
def maskDetection(detections):
    global area
    # 未指定区域，全图统计
    if "mask" not in area or len(area["mask"]) == 0:
        if len(detections) > 0:
            for detection in detections:
                # Labels: {0: 'face', 1: 'face_masked'}
                class_id = int(detection.cls.item())
                if class_id == 0:
                    triggerAlertMask()
    # 存在指定区域，只统计指定区域
    else:
        human_boxes = detections.cpu().xyxy.numpy()
        centers = [(int((x1 + x2) / 2), int(y2)) for x1, y1, x2, y2 in human_boxes]
        for center in centers:
            polygons = [np.array(polygon, dtype=np.int32) for polygon in area["mask"]]
            for idx, polygon in enumerate(polygons):
                result = cv2.pointPolygonTest(polygon, center, False)
                if result >= 0:
                    if detections[idx].cls.item() == 0:
                        triggerAlertMask()


previous_frame = None
last_update_time = time.time()
def motionDetection(frame, sensitivity, interval=2):
    """
    运动检测（检测目标区域是否有运动）
    :param frame: 当前帧
    :param sensitivity: 灵敏度 1~100 （数值越大，检测越敏感）
                       灵敏度级别定义：
                       - 极低灵敏度: 1-15 (需要大量运动才能触发)
                       - 低灵敏度: 16-30 (需要较大运动才能触发)
                       - 中低灵敏度: 31-45 (需要中等偏下运动才能触发)
                       - 中灵敏度: 46-60 (平衡的检测灵敏度)
                       - 中高灵敏度: 61-75 (需要中等偏上运动即可触发)
                       - 高灵敏度: 76-85 (需要较小运动即可触发)
                       - 极高灵敏度: 86-100 (轻微运动即可触发)
    :param interval: 参考帧更新间隔（秒）
    """
    global previous_frame, last_update_time, area

    # 转换为灰度图并降噪
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    # 增加模糊核大小以减少噪声
    gray = cv2.GaussianBlur(gray, (7, 7), 0)

    # 更新参考帧
    if previous_frame is None or (time.time() - last_update_time) > interval:
        previous_frame = gray
        last_update_time = time.time()
        return False  # 参考帧刚更新，暂不检测

    # 计算帧差
    frame_delta = cv2.absdiff(previous_frame, gray)
    
    # 调整阈值计算，使灵敏度更直观
    # 灵敏度越高，阈值越低，检测越敏感
    threshold_value = 100 - sensitivity  # 范围从99到0
    _, thresh = cv2.threshold(frame_delta, threshold_value, 255, cv2.THRESH_BINARY)
    
    # 增加膨胀操作以连接相近的运动区域，减少误报
    thresh = cv2.dilate(thresh, None, iterations=3)
    
    # 添加形态学操作以减少噪声
    kernel = np.ones((5, 5), np.uint8)
    thresh = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)
    thresh = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel)

    # 轮廓检测
    contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    
    # 过滤掉太小的轮廓
    min_contour_area = 100  # 最小轮廓面积
    contours = [c for c in contours if cv2.contourArea(c) > min_contour_area]

    # 调整运动像素阈值计算，使灵敏度更直观
    # 灵敏度越高，阈值越低，检测越敏感
    motion_threshold = max(500, 100 * (100 - sensitivity))  # 范围从9900到500

    # **1️⃣ 全图检测**
    if "motion" not in area or len(area["motion"]) == 0:
        total_motion_area = sum(cv2.contourArea(c) for c in contours)
        # print("###### motion:", total_motion_area, motion_threshold)
        if total_motion_area > motion_threshold:
            triggerAlertMotion()
            return True
        return False

    # **2️⃣ 限定区域检测**
    polygons = [np.array(polygon, dtype=np.int32) for polygon in area["motion"]]
    
    # 添加区域重叠比例阈值，避免小面积重叠触发报警
    overlap_ratio_threshold = 0.1  # 重叠区域必须至少占运动区域的10%
    
    # 创建一个掩码图像，用于可视化检测区域
    mask = np.zeros_like(gray)
    for polygon in polygons:
        cv2.fillPoly(mask, [polygon], 255)
    
    for contour in contours:
        # 计算运动区域的外接矩形
        x, y, w, h = cv2.boundingRect(contour)
        motion_box = np.array([[x, y], [x + w, y], [x + w, y + h], [x, y + h]], dtype=np.int32)
        
        # 计算当前轮廓的面积
        contour_area = cv2.contourArea(contour)
        
        # 检查是否有重叠区域
        for polygon in polygons:
            # 计算多边形交集面积
            intersection_area = cv2.intersectConvexConvex(motion_box, polygon)[0]
            
            # 计算重叠比例
            overlap_ratio = intersection_area / contour_area if contour_area > 0 else 0
            
            # 只有当重叠面积超过阈值且重叠比例足够大时才考虑为有效运动
            if intersection_area > motion_threshold and overlap_ratio > overlap_ratio_threshold:
                # print("###### motion:", intersection_area, motion_threshold, overlap_ratio, overlap_ratio_threshold)
                triggerAlertMotion()
                return True
        
    return False


# 用于存储人员穿越线的状态
line_crossing_states = {}  # {track_id: {'last_position': (x, y), 'crossed': bool}}
def lineCrossingDetection(detections, direction):
    """
    检测人员穿越线
    :param detections: 检测结果
    :param direction: 报警方向 'entry' 或 'exit', 无则都报警
    """
    global line_crossing_states
    
    if "lineCrossing" not in area or len(area["lineCrossing"]) == 0:
        return
        
    # 获取所有检测框的中心点
    boxes = detections.cpu().xyxy.numpy()
    centers = [(int((x1 + x2) / 2), int(y2)) for x1, y1, x2, y2 in boxes]
    
    # 获取追踪ID
    track_ids = [int(det.id.item()) if det.id is not None else 0 for det in detections]
    
    # 对每个检测到的人进行处理
    for i, (center, track_id) in enumerate(zip(centers, track_ids)):
        if track_id == 0:  # 跳过未追踪的目标
            continue
            
        # 如果是新目标，初始化其状态
        if track_id not in line_crossing_states:
            line_crossing_states[track_id] = {
                'last_position': center,
                'crossed': False
            }
            continue
            
        last_pos = line_crossing_states[track_id]['last_position']
        
        # 检查是否穿越线
        for line in area["lineCrossing"]:
            if len(line) < 2:
                continue
            # 计算线段的方向向量
            line_vec = np.array([line[1][0] - line[0][0], line[1][1] - line[0][1]])
            # 计算点到线段的向量
            point_vec = np.array([center[0] - line[0][0], center[1] - line[0][1]])
            last_point_vec = np.array([last_pos[0] - line[0][0], last_pos[1] - line[0][1]])
            
            # 计算叉积判断方向
            cross_product = np.cross(line_vec, point_vec)
            last_cross_product = np.cross(line_vec, last_point_vec)
            
            # 如果穿越了线
            if (cross_product * last_cross_product) < 0:  # 符号改变表示穿越
                if not line_crossing_states[track_id]['crossed']:
                    line_crossing_states[track_id]['crossed'] = True
                    
                    # 根据方向和设置决定是否报警
                    if direction == 'entry' and cross_product > 0:
                        triggerAlertLineCrossing()
                    elif direction == 'exit' and cross_product < 0:
                        triggerAlertLineCrossing()
                    # all / None
                    else:
                        triggerAlertLineCrossing()
                        
        # 更新位置
        line_crossing_states[track_id]['last_position'] = center


def do_count_people(human_boxes):
    count = 0
    if not human_boxes.any():
        return count
    # 未指定区域，全图统计
    if "peopleCounting" not in area or len(area["peopleCounting"]) == 0:
        count = len(human_boxes)
    # 存在指定区域，只统计指定区域
    else:
        centers = [(int((x1 + x2) / 2), int(y2)) for x1, y1, x2, y2 in human_boxes]
        for center in centers:
            polygons = [np.array(polygon, dtype=np.int32) for polygon in area["peopleCounting"]]
            for idx, polygon in enumerate(polygons):
                result = cv2.pointPolygonTest(polygon, center, False)
                if result >= 0:
                    count += 1
    return count


requests.packages.urllib3.disable_warnings()
http_session = requests.Session()
http_session.verify = False
sio = socketio.Client(http_session=http_session)

@sio.event
def connect():
    print('connected')
    # init 
    sio.emit('c_va_status', {'status': 'connected', 'mid': mid})
    sio.emit('c_va_init', {'mid': mid})

@sio.event
def connect_error(err):
    print(err)

# include alarm and matrix
@sio.on('c_update_settings')
def on_c_update_alarm(data):
    global area, alarm_config
    print('Received c_update_settings', data)
    if data['mid'] != mid:
        return
    if 'matrix' in data['settings']:
        area = data["settings"]["matrix"]
    if 'alarm' in data['settings']:
        alarm_config = data["settings"]["alarm"]
        # 报警冷却时间
        if 'throttle' in alarm_config:
            global throttle_seconds
            throttle_seconds = alarm_config['throttle']

@sio.on('c_update_matrix')
def on_c_update_matrix(data):
    global area
    print('Received c_update_matrix', data)
    if data['mid'] != mid:
        return
    area = data["matrix"] if 'matrix' in data else area

@sio.on('c_ping')
def on_c_ping(data):
    print(f"Received Ping")
    sio.emit('c_pong', "pong")

sio.connect('http://127.0.0.1:3000', transports=['websocket'])


def draw_matrix_roi(frame, points, color, im0):
    pts = len(points)
    # 先绘制矩阵的边框
    if pts > 1:
        for i in range(1, pts):
            cv2.line(frame, points[i - 1], points[i], color, 2)
        cv2.line(frame, points[-1], points[0], color, 2)  # 将最后一个点连接到第一个点


def draw_matrix_overlay(frame, all_points, im0):
    overlay = im0.copy()
    alpha = 0.1  # Adjust alpha for transparency
    for (points, color) in all_points:
        if len(points) > 2:  # Ensure we have enough points to form a polygon
            # Convert each polygon's points into a NumPy array in the required format
            pts_array = np.array(points, np.int32).reshape((-1, 1, 2))
            # Draw the filled polygon on the overlay
            cv2.fillPoly(overlay, [pts_array], color=color)
    # Apply the overlay to the original frame
    cv2.addWeighted(overlay, alpha, frame, 1 - alpha, 0, frame)
    

def do_push_stream(frame):
    global ffmpeg_process
    if not ffmpeg_process:
        frame_height, frame_width = frame.shape[:2]
        ffmpeg_command = [
            'ffmpeg',
            '-y',  # 覆盖输出文件
            '-loglevel', 'quiet',      # 不显示日志
            '-f', 'rawvideo',  # 输入格式为原始视频
            '-pix_fmt', 'bgr24',  # 输入像素格式
            '-s', f"{frame_width}x{frame_height}",  # 输入视频大小
            '-r', str(15),  # 帧率
            '-i', '-',  # 从stdin读取输入数据
            '-c:v', 'libx264',  # 使用x264编码
            '-preset', 'ultrafast',  # 编码预设
            '-tune', 'zerolatency',  # 降低延迟
            '-g', '5',
            '-f', 'rtsp',  # 输出格式为RTSP
            f'rtsp://192.168.1.24:8554/{mid}_va'  # RTSP流地址
        ]
        ffmpeg_process = subprocess.Popen(ffmpeg_command, stdin=subprocess.PIPE)
    ffmpeg_process.stdin.write(frame.tobytes())


# r1: geofencing, peopleCounting, crowd, lineCrossing 
# r2: mask, facial
for r1, r2 in zip_longest(results1 if model1 else [], results2 if model2 else [], fillvalue=None):
    if not started:
        sio.emit('c_va_status', {'status': 'started', 'mid': mid})
        started = True
        
    frame = r1.orig_img if r1 is not None else r2.orig_img if r2 is not None else None
    
    # 只在 model1 存在时执行人体相关检测
    if r1:
        detections1 = r1.boxes
        # geofencing
        if 'geofencing' in alarm_config and alarm_config['geofencing']['enabled']:
            threshold = alarm_config['geofencing']['threshold'] if 'threshold' in alarm_config['geofencing'] else 0
            geoFencing(detections1.cpu().xyxy.numpy(), threshold)
        
        # people counting
        if 'peopleCounting' in alarm_config and alarm_config['peopleCounting']['enabled']:
            peopleCounting(detections1.cpu().xyxy.numpy())
        
        # crowd
        if 'crowd' in alarm_config and alarm_config['crowd']['enabled']:
            threshold = alarm_config['crowd']['threshold'] if 'threshold' in alarm_config['crowd'] else 10
            crowdDetection(detections1.cpu().xyxy.numpy(), threshold)
            
        # idle time
        if 'idleTime' in alarm_config and alarm_config['idleTime']['enabled']:
            threshold = alarm_config['idleTime']['threshold'] if 'threshold' in alarm_config['idleTime'] else 10
            idleTimeDetection(detections1, threshold)
    
        # line crossing 
        if 'lineCrossing' in alarm_config and alarm_config['lineCrossing']['enabled']:
            direction = alarm_config['lineCrossing'].get('threshold', None)
            lineCrossingDetection(detections1, direction)

        # 绘制检测结果
        process_detections(frame, detections1, (0, 255, 0), model1)
        
    elif 'peopleCounting' in alarm_config and alarm_config['peopleCounting']['enabled']:
        peopleCounting(np.array([]))
    
    # 只在 model2 存在时执行口罩和人脸检测
    if r2:
        detections2 = r2.boxes
        # facial
        if 'facial' in alarm_config and alarm_config['facial']['enabled']:
            facialDetection(detections2.cpu().xyxy.numpy())
        
        # mask
        if 'mask' in alarm_config and alarm_config['mask']['enabled']:
            maskDetection(detections2)
            
        # 绘制检测结果
        process_detections(frame, detections2, (0, 255, 0), model2)
    
    # motion detection 不依赖特定模型
    if 'motion' in alarm_config and alarm_config['motion']['enabled']:
        threshold = alarm_config['motion']['threshold'] if 'threshold' in alarm_config['motion'] else 40
        motionDetection(frame, threshold)

    # 绘制矩阵
    if is_draw_matrix:
        overlay_points = []
        for index, (key, points_list) in enumerate(area.items()): 
            color = colors[index] if index < len(colors) else (255, 255, 255)  # Default white
            for points in points_list:
                draw_matrix_roi(frame, points, color, frame)
                overlay_points.append((points, color))
        draw_matrix_overlay(frame, overlay_points, frame)
    # 显示屏幕
    if is_show_screen:
        cv2.namedWindow('fullscreen', cv2.WINDOW_NORMAL)
        cv2.resizeWindow('fullscreen', 960, 480)  # 设置默认大小为全屏窗口大小
        cv2.imshow("fullscreen", frame)
    # ffmpeg推流
    if is_push_stream:
        do_push_stream(frame)
    if cv2.waitKey(1) & 0xFF == ord('q'):
        break
    