import cv2
import torch
import torch.nn as nn
import torchvision.transforms as transforms
from PIL import Image
import vgg

def detect_faces(frame):
    cascade_path = "E:/Program Files/Python37/Lib/site-packages/cv2/data/haarcascade_frontalface_alt2.xml"
    face_cascade = cv2.CascadeClassifier(cascade_path)
    gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    faces = face_cascade.detectMultiScale(gray_frame, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30))
    return faces

def load_model():
    model = vgg.VGG('small_VGG16')
    model.load_state_dict(torch.load("C:/Users/王昊宸/Downloads/face_recognition-master/face_reco/pkl/face_bk_cpu.pkl"))
    model.eval()
    return model

def recognize_faces(frame, faces, model):
    identities = []
    for (x, y, w, h) in faces:
        face_img = frame[y:y + h, x:x + w]  # 提取检测到的人脸图像
        pil_image = Image.fromarray(cv2.cvtColor(face_img, cv2.COLOR_BGR2RGB))
        transform = transforms.Compose([transforms.Resize((128, 128)),
                                        transforms.ToTensor(),
                                        # transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
                                        ])
        tensor_img = transform(pil_image)
        tensor_img = tensor_img.unsqueeze(0)  # 添加batch维度
        with torch.no_grad():
            output = model(tensor_img)
            _, predicted = torch.max(output.data, 1)
            # 在这里添加将输出结果转换为身份信息的代码
            # 示例代码：假设输出是一个类别标签，你可以根据模型输出的索引和对应的类别标签进行映射
            # identity = "Unknown "  # 默认为未知身份
            identity = str(predicted)
            identities.append(identity)
    return identities

def draw_faces(frame, faces, identities):
    for (x, y, w, h), identity in zip(faces, identities):
        cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
        font = cv2.FONT_HERSHEY_SIMPLEX
        cv2.putText(frame, identity, (x, y - 10), font, 0.9, (0, 255, 0), 2)

def capture_video():
    camera_idx = 0
    cap = cv2.VideoCapture(camera_idx)

    model = load_model()

    while True:
        ret, frame = cap.read()
        if not ret:
            break

        faces = detect_faces(frame)
        identities = recognize_faces(frame, faces, model)
        # identities = ["Unknown"]

        draw_faces(frame, faces, identities)

        cv2.imshow("Video", frame)

        if cv2.waitKey(1) & 0xFF == ord("q"):
            break

    cap.release()
    cv2.destroyAllWindows()

if __name__ == "__main__":
    capture_video()
