import cv2

def detect_faces(frame, face_cascade):
    # 将图像转换为灰度图像
    gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    
    # 使用人脸检测器检测图像中的人脸
    faces = face_cascade.detectMultiScale(gray_frame, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30))
    
    return faces

def main():
    # 加载预训练的人脸检测模型
    face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
    
    # 打开默认摄像头
    cap = cv2.VideoCapture(0)
    
    if not cap.isOpened():
        print("Error: Could not open video capture.")
        return
    
    while True:
        # 读取摄像头的每一帧
        ret, frame = cap.read()
        
        if not ret:
            print("Error: Could not read frame.")
            break
        
        # 检测人脸
        faces = detect_faces(frame, face_cascade)
        
        # 绘制矩形框标记人脸
        for (x, y, w, h) in faces:
            cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)
        
        # 显示结果
        cv2.imshow('Face Detection', frame)
        
        # 按 'q' 键退出循环
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    
    # 释放摄像头和关闭窗口
    cap.release()
    cv2.destroyAllWindows()

# if __name__ == '__main__':
#     main()


import cv2
import numpy as np
from PyQt6 import QtCore, QtGui, QtWidgets
from face_detect import FaceDetect


class VideoThread(QtCore.QThread):
    change_pixmap_signal = QtCore.pyqtSignal(np.ndarray)

    def __init__(self, face_detect):
        super().__init__()
        self.cap = cv2.VideoCapture(0)
        self.running = True
        self.face_detect = face_detect  # 实例化传入的人脸检测模块

    def run(self):
        while self.running:
            flag, frame = self.cap.read()
            if flag:
                # 进行人脸检测或其他图像处理
                self.face_detect.detect(frame)
                # 发出信号传递图像数据
                self.change_pixmap_signal.emit(frame)

    def stop(self):
        self.running = False
        self.cap.release()


class MainWindow(QtWidgets.QMainWindow):
    def __init__(self, face_detect):
        super().__init__()
        self.initUI()
        self.video_thread = VideoThread(face_detect)
        self.video_thread.change_pixmap_signal.connect(self.update_image)
        self.video_thread.start()

    def initUI(self):
        self.setWindowTitle("Video Stream")
        self.setGeometry(100, 100, 800, 600)

        # QLabel to show the video stream
        self.face_input = QtWidgets.QLabel(self)
        self.face_input.setGeometry(100, 100, 640, 480)
        self.face_input.setAlignment(QtCore.Qt.AlignmentFlag.AlignCenter)

        self.show()

    def update_image(self, cv_img):
        # Convert image to RGB format
        rgb_image = cv2.cvtColor(cv_img, cv2.COLOR_BGR2RGB)
        h, w, ch = rgb_image.shape
        qt_image = QtGui.QImage(rgb_image.data, w, h, ch * w, QtGui.QImage.Format.Format_RGB888)

        # 自动缩放图像以适应 QLabel
        self.face_input.setPixmap(QtGui.QPixmap.fromImage(qt_image).scaled(self.face_input.width(), 
                                                                           self.face_input.height(), 
                                                                           QtCore.Qt.AspectRatioMode.KeepAspectRatio))

    def closeEvent(self, event):
        self.video_thread.stop()
        event.accept()



if __name__ == '__main__':
    import sys
    app = QtWidgets.QApplication(sys.argv)
    face_detect = FaceDetect()
    window = MainWindow(face_detect)
    sys.exit(app.exec())
