import sys
from PyQt5.QtWidgets import QApplication, QWidget, QVBoxLayout, QHBoxLayout, QLabel, QPushButton, QGridLayout
from PyQt5.QtGui import QImage, QPixmap
from PyQt5.QtCore import Qt, QTimer
import cv2
import torch
import numpy as np
import torchvision.transforms as transforms
from PIL import Image
import vgg
from datetime import datetime

class WebcamWidget(QWidget):
    def __init__(self, parent=None):
        super(WebcamWidget, self).__init__(parent)
        
        self.camera_idx = 0
        self.is_camera_on = False
        self.model = self.load_model()

        self.video_label = QLabel(self)
        self.video_label.setAlignment(Qt.AlignCenter)

        self.toggle_button = QPushButton('Start', self)
        self.toggle_button.setCheckable(True)
        self.toggle_button.toggled.connect(self.toggle_camera)

        self.auth = ["zhj"]
        self.people_lists = ["zhj", "whc"]

        self.init_layout()

    def init_layout(self):
        main_layout = QVBoxLayout(self)
        
        grid_layout = QGridLayout()
        grid_layout.addWidget(self.video_label, 0, 0, 1, 1, Qt.AlignCenter)  # 将相机画面放置在QGridLayout中
        main_layout.addLayout(grid_layout)

        # 使用嵌套的水平布局来放置按钮和监控画面
        switch_layout = QHBoxLayout()
        switch_layout.addWidget(self.toggle_button)
        main_layout.addLayout(switch_layout)

    def toggle_camera(self, state):
        if state:
            self.toggle_button.setText('Stop')
            self.is_camera_on = True
            self.camera_capture = cv2.VideoCapture(self.camera_idx)
            self.timer = QTimer(self)
            self.timer.timeout.connect(self.update_frame)
            self.timer.start(30)  # 30ms interval to update frame
        else:
            self.toggle_button.setText('Start')
            self.is_camera_on = False
            self.camera_capture.release()
            self.timer.stop()

    def update_frame(self):
        ret, frame = self.camera_capture.read()
        if ret:
            faces = self.detect_faces(frame)
            identities = self.recognize_faces(frame, faces, self.model)
            self.draw_faces(frame, faces, identities)
            self.display_frame(frame)

    # def display_frame(self, frame):
    #     # Convert BGR to RGB
    #     rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
    #     height, width, channel = rgb_frame.shape
    #     bytes_per_line = 3 * width
    #     q_image = QImage(rgb_frame.data, width, height, bytes_per_line, QImage.Format_RGB888)
    #     q_pixmap = QPixmap.fromImage(q_image)
    #     self.video_label.setPixmap(q_pixmap)

    def display_frame(self, frame):
        # 获取主窗口（外层界面）的大小
        main_window_width = self.width()
        main_window_height = self.height()

        # 获取相机画面的大小
        camera_frame_width = frame.shape[1]
        camera_frame_height = frame.shape[0]

        # 获取当前时间
        current_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")

        # 确定时间文本的位置
        text_x = 320
        text_y = 30

        # 添加时间文本到图像
        cv2.putText(frame, current_time, (text_x, text_y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)

        # 计算主窗口的宽高比和相机画面的宽高比
        main_window_aspect_ratio = main_window_width / main_window_height
        camera_frame_aspect_ratio = camera_frame_width / camera_frame_height

        # 计算裁剪后的相机画面区域
        if main_window_aspect_ratio > camera_frame_aspect_ratio:
            crop_width = camera_frame_width
            crop_height = int(camera_frame_width / main_window_aspect_ratio)
            y_offset = (camera_frame_height - crop_height) // 2
            x_offset = 0
        else:
            crop_width = int(camera_frame_height * main_window_aspect_ratio)
            crop_height = camera_frame_height
            x_offset = (camera_frame_width - crop_width) // 2
            y_offset = 0

        # 裁剪相机画面
        cropped_frame = frame[y_offset: y_offset + crop_height, x_offset: x_offset + crop_width]

        # 缩放相机画面，使其填满外层界面
        scale_ratio = max(main_window_width / crop_width, main_window_height / crop_height)
        scaled_frame = cv2.resize(cropped_frame, None, fx=scale_ratio, fy=scale_ratio)

        # 将 BGR 图像转换为 RGB
        rgb_frame = cv2.cvtColor(scaled_frame, cv2.COLOR_BGR2RGB)
        height, width, channel = rgb_frame.shape
        bytes_per_line = 3 * width
        q_image = QImage(rgb_frame.data, width, height, bytes_per_line, QImage.Format_RGB888)
        q_pixmap = QPixmap.fromImage(q_image)
        self.video_label.setPixmap(q_pixmap)

        # 将标签的大小设置为相机画面的大小，并居中显示
        self.video_label.setGeometry(0, 0, main_window_width, main_window_height)




    def detect_faces(self, frame):
        cascade_path = "E:/Program Files/Python37/Lib/site-packages/cv2/data/haarcascade_frontalface_alt2.xml"
        face_cascade = cv2.CascadeClassifier(cascade_path)
        gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        faces = face_cascade.detectMultiScale(gray_frame, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30))
        return faces

    def load_model(self):
        model = vgg.VGG('small_VGG16')
        model.load_state_dict(torch.load("C:/Users/王昊宸/Downloads/face_recognition-master/face_reco/pkl/face_bk_cpu.pkl"))
        model.eval()
        return model

    def recognize_faces(self, frame, faces, model):
        identities = []
        
        for (x, y, w, h) in faces:
            face_img = frame[y:y + h, x:x + w]  # Extract the detected face image
            pil_image = Image.fromarray(cv2.cvtColor(face_img, cv2.COLOR_BGR2RGB))
            transform = transforms.Compose([transforms.Resize((128, 128)),
                                            transforms.ToTensor(),
                                            # transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
                                            ])
            tensor_img = transform(pil_image)
            tensor_img = tensor_img.unsqueeze(0)  # Add batch dimension
            with torch.no_grad():
                output = model(tensor_img)
                # 求出向量中的最小值和最大值
                min_value = torch.min(output)
                max_value = torch.max(output)

                # 进行Min-Max归一化
                normalized_output = (output - min_value) / (max_value - min_value)
                max_val, predicted = torch.max(normalized_output.data, 1)
                max_val = max_val.item()
                predicted = predicted.item()
                # Add code here to convert the output to identity information
                # Sample code: Suppose the output is a class label, you can map it to corresponding identity based on the model's output index and class labels
                # identity = "Unknown "  # Default to unknown identity
                # identity = str(predicted)
                if max_val < 0.96:
                    identity = "Unknown"
                else:
                    identity = self.people_lists[int(predicted)]
                    if identity in self.auth:
                        identity += "\nAuthorized"
                    else:
                        identity += "\nUnauthorized"
                identities.append(identity)
        return identities

    def draw_faces(self, frame, faces, identities):
        for (x, y, w, h), identity in zip(faces, identities):
            font = cv2.FONT_HERSHEY_SIMPLEX

            # 将identity按照换行符\n分割为多行
            lines = identity.split("\n")

            # 根据auth列表中是否包含该行文字来选择文字颜色
            if any(name in lines for name in self.auth):
                color = (0, 255, 0)  # 绿色
            else:
                color = (0, 0, 255)  # 红色
            
            # 绘制人脸框
            cv2.rectangle(frame, (x, y), (x + w, y + h), color, 2)

            # 确定每行文字的位置
            text_y = y - 10
            for line in lines:
                text_width, text_height = cv2.getTextSize(line, font, 0.9, 2)[0]
                text_x = x  # 将text_x设置为x，实现左对齐

                cv2.putText(frame, line, (text_x, text_y), font, 0.9, color, 2)
                text_y += text_height + 10  # 增加行间距，可以根据需要调整

if __name__ == "__main__":
    app = QApplication(sys.argv)
    window = QWidget()
    window.setWindowTitle("Face Recognition")

    # 设置初始画面大小
    window.setFixedSize(1000, 1200)

    main_layout = QHBoxLayout()
    webcam_widget = WebcamWidget()
    main_layout.addWidget(webcam_widget)
    window.setLayout(main_layout)
    window.show()
    sys.exit(app.exec_())






