# import cv2

# # 加载 Haar Cascade 分类器（这需要你有一个 XML 文件）
# face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')

# # 从摄像头捕获视频
# cap = cv2.VideoCapture('/dev/came1')

# while True:
#     # 捕获每一帧
#     ret, frame = cap.read()

#     # 将帧转换为灰度图像（人脸检测通常需要灰度图）
#     gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

#     # 检测人脸
#     faces = face_cascade.detectMultiScale(gray, 1.1, 4)

#     # 画出每一个人脸的矩形框
#     for (x, y, w, h) in faces:
#         cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)

#     # 显示结果
#     cv2.imshow('Face Detection', frame)

#     # 如果按下 'q' 键，则退出
#     if cv2.waitKey(1) & 0xFF == ord('q'):
#         break

# # 释放摄像头资源
# cap.release()
# cv2.destroyAllWindows()

import cv2
import numpy as np
import time

# 初始化人脸检测模型
face_cascade = cv2.CascadeClassifier(
    cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')

# 视频捕获初始化
cap = cv2.VideoCapture('/dev/came1')
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)  # 降低分辨率提升性能
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)

# 性能优化参数
SCALE_FACTOR = 1.1      # 检测尺度参数（值越大检测越快，可能漏检）
MIN_NEIGHBORS = 5       # 检测置信度参数
ROTATION_ANGLES = np.arange(-180, 180, 15)  # 优化旋转角度范围（-30°, 0°, 30°）
DETECTION_INTERVAL = 3  # 每3帧进行一次全角度检测

def process_frame(frame, frame_count):
    """处理视频帧的核心函数"""
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    all_faces = []

    # 只在特定帧进行全角度检测
    if frame_count % DETECTION_INTERVAL == 0:
        for angle in ROTATION_ANGLES:
            # 图像旋转
            h, w = gray.shape
            center = (w // 2, h // 2)
            M = cv2.getRotationMatrix2D(center, angle, 1.0)
            rotated = cv2.warpAffine(gray, M, (w, h))
            
            # 人脸检测
            faces = face_cascade.detectMultiScale(
                rotated, scaleFactor=SCALE_FACTOR, minNeighbors=MIN_NEIGHBORS)
            
            # 坐标逆变换
            for (x, y, w, h) in faces:
                # 逆变换矩阵计算
                M_inv = cv2.invertAffineTransform(M)
                pts = np.array([[x, y], [x + w, y], [x + w, y + h], [x, y + h]], dtype=np.float32)
                transformed = cv2.transform(pts.reshape(-1, 1, 2), M_inv).reshape(-1, 2)
                
                # 计算新边界框
                x_new = int(np.clip(transformed[:, 0].min(), 0, w))
                y_new = int(np.clip(transformed[:, 1].min(), 0, h))
                w_new = int(np.clip(transformed[:, 0].max() - x_new, 0, w - x_new))
                h_new = int(np.clip(transformed[:, 1].max() - y_new, 0, h - y_new))
                
                all_faces.append([x_new, y_new, w_new, h_new])

        # 非极大值抑制
        if len(all_faces) > 0:
            boxes = np.array(all_faces)
            # 使用OpenCV内置NMS（性能更优）
            indices = cv2.dnn.NMSBoxes(
                boxes[:, :4].tolist(), 
                [1]*len(boxes),  # 假设所有检测框置信度相同
                score_threshold=0.3,
                nms_threshold=0.3
            )
            return boxes[indices.flatten()]
    return []

# FPS计算
prev_time = 0
frame_count = 0

while True:
    ret, frame = cap.read()
    if not ret:
        break

    # 镜像显示
    frame = cv2.flip(frame, 1)
    
    # 执行检测
    faces = process_frame(frame, frame_count)
    frame_count += 1

    # 绘制检测结果
    for (x, y, w, h) in faces:
        cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
        cv2.putText(frame, f'({x}, {y})', (x, y - 10),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)

    # 显示FPS
    curr_time = time.time()
    fps = 1 / (curr_time - prev_time + 1e-6)
    prev_time = curr_time
    cv2.putText(frame, f'FPS: {int(fps)}', (10, 30),
                cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)

    cv2.imshow('Real-time Face Detection', frame)

    # 退出机制
    if cv2.waitKey(1) & 0xFF == ord('q'):
        break

cap.release()
cv2.destroyAllWindows()