import torch
from PIL import Image
import torchvision.transforms as transforms
import torch.nn.functional as F
from facenet_pytorch import MTCNN, InceptionResnetV1

# 确保设备可用
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')

# 定义 MTCNN 模型用于检测面部并裁剪
mtcnn = MTCNN(device=device)

# 定义预训练的 FaceNet 模型（InceptionResnetV1）
resnet = InceptionResnetV1(pretrained='vggface2').eval().to(device)

# 加载两张图像
img_paths = ["baseface/yangyu1.png", "baseface/bigbeauty.png"]
imgs = [Image.open(img_path) for img_path in img_paths]

# 使用 MTCNN 检测面部并裁剪
cropped_imgs = []
for img in imgs:
    try:
        cropped_img = mtcnn(img)
        cropped_imgs.append(cropped_img)
    except Exception as e:
        print(f"Error during face detection and cropping: {e}")

# 将裁剪后的图像放入一个 batch 中
batch = torch.stack(cropped_imgs).to(device)

# 使用 torch.no_grad() 进行特征提取，以节省内存和加速计算
with torch.no_grad():
    embeddings = resnet(batch)

# 计算两张图片的特征向量之间的余弦相似度
similarity = F.cosine_similarity(embeddings[0].unsqueeze(0), embeddings[1].unsqueeze(0))

# 输出余弦相似度
print("Cosine similarity between the two images:", similarity.item())
# 输出特征向量
# for i, embedding in enumerate(embeddings):
#     feature_vector = embedding.squeeze().detach().cpu().numpy()
#     print(f"Feature vector of image {i + 1}:")
#     print("Feature vector shape:", feature_vector.shape)
#     print("Feature vector values:")
#     print(feature_vector)