import torch
import cv2
import numpy as np
from PIL import Image
import torch.nn.functional as F
from facenet_pytorch import MTCNN, InceptionResnetV1
import os
import time

# 确保设备可用
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')

# 定义 MTCNN 模型用于检测面部并裁剪
mtcnn = MTCNN(device=device)

# 定义预训练的 FaceNet 模型（InceptionResnetV1）
resnet = InceptionResnetV1(pretrained='vggface2').eval().to(device)

# 加载集合中的图像
img_paths_collection = []
for i in os.listdir("baseface"):
    img_paths_collection.append("baseface/" + i)

imgs_collection = [Image.open(img_path) for img_path in img_paths_collection]

# 使用 MTCNN 检测面部并裁剪
cropped_imgs_collection = []
for img in imgs_collection:
    try:
        cropped_img = mtcnn(img)
        cropped_imgs_collection.append(cropped_img)
    except Exception as e:
        print(f"Error during face detection and cropping: {e}")

# 将裁剪后的图像放入一个 batch 中
batch_collection = torch.stack(cropped_imgs_collection).to(device)

# 使用 torch.no_grad() 进行特征提取，以节省内存和加速计算
with torch.no_grad():
    embeddings_collection = resnet(batch_collection)

# 打开视频捕获设备（替换为本地视频文件路径）
video_path = "video/video1.mp4"
cap = cv2.VideoCapture(video_path)

# 确保 success 文件夹存在
if not os.path.exists('success'):
    os.makedirs('success')
# 计算处理时间间隔（每6帧处理一次）
frame_interval = 30
last_processed_frame_count = 0
try:
    while True:
        start_time = time.time()
        # 读取视频帧
        ret, frame = cap.read()
        if not ret:
            break
        cv2.imshow('Video', frame)
        wait_time = int(1000 / 30)  # 1000毫秒 = 1秒
        if cv2.waitKey(wait_time) & 0xFF == ord('q'):
            break
        # 更新处理计数器
        last_processed_frame_count += 1
        # 显示视频流
        # 只处理每6帧一次
        if last_processed_frame_count < frame_interval:
            continue
        last_processed_frame_count = 0
        current_time = time.time()

        # 转换帧格式
        frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        frame_pil = Image.fromarray(frame_rgb)

        # 使用 MTCNN 检测面部并裁剪
        try:
            cropped_frame = mtcnn(frame_pil)
        except Exception as e:
            print(f"Error during face detection and cropping: {e}")
            continue

        if cropped_frame is not None:
            # 提取当前帧的特征向量
            with torch.no_grad():
                frame_embedding = resnet(cropped_frame.unsqueeze(0).to(device))

            # 计算当前帧与集合中所有元素的余弦相似度
            similarities = F.cosine_similarity(frame_embedding, embeddings_collection)

            # 找出相似度最高的元素
            max_similarity, max_index = similarities.max(dim=0)

            # 输出相似度最高的元素
            print(f"img :{img_paths_collection[max_index]}")
            print(f"The highest cosine similarity is: {max_similarity.item()}")

            # 检查相似度是否大于70%
            if max_similarity.item() > 0.7:
                # 将裁剪后的面部图像从张量转换为 NumPy 数组
                cropped_frame_np = cropped_frame.permute(1, 2, 0).cpu().numpy()

                # 将 NumPy 数组的值从 [0, 1] 转换为 [0, 255]
                cropped_frame_np = np.clip(cropped_frame_np * 255, 0, 255).astype(np.uint8)

                # 保存图像到 success 文件夹中
                timestamp = time.strftime("%Y%m%d_%H%M%S")
                save_path = os.path.join('success',
                                         f"{os.path.basename(img_paths_collection[max_index.item()].split('.')[0])}_{timestamp}_{max_similarity.item():.2f}.jpg")
                cv2.imwrite(save_path, cv2.cvtColor(cropped_frame_np, cv2.COLOR_RGB2BGR))
                print(f"Saved image to {save_path}")

                # 显示裁剪后的面部
                cropped_frame_np_bgr = cv2.cvtColor(cropped_frame_np, cv2.COLOR_RGB2BGR)
                cv2.imshow('Cropped Face', cropped_frame_np_bgr)

finally:
    # 释放视频捕获设备并关闭所有窗口
    cap.release()
    cv2.destroyAllWindows()
