# linux python 3.10

# https://github.com/deepinsight/insightface
# https://github.com/serengil/deepface

# pip install insightface -i https://pypi.tuna.tsinghua.edu.cn/simple/
# pip install onnxruntime -i https://pypi.tuna.tsinghua.edu.cn/simple/
# pip install chromadb -i https://pypi.tuna.tsinghua.edu.cn/simple/
# pip install gradio -i https://pypi.tuna.tsinghua.edu.cn/simple/

# download_path: /root/.insightface/models/buffalo_l
# Downloading /root/.insightface/models/buffalo_l.zip from https://github.com/deepinsight/insightface/releases/download/v0.7/buffalo_l.zip...

# pip install deepface -i https://pypi.tuna.tsinghua.edu.cn/simple/
# pip install tf-keras -i https://pypi.tuna.tsinghua.edu.cn/simple/

# 导入必要的库
import os
import time
import chromadb
import gradio as gr
from PIL import Image, ImageDraw, ImageFont
import tempfile
import shutil

cwd = "/Users/hhwang/code/gitee/haihwang/langgraph-examples/face-recognize"

# 获取当前路径
src_image_path = os.path.join(cwd, "src")
tmp_save_dir = os.path.join(cwd, "tmp")

# 定义人物数据集
class Person(dict):
    pass

image_set = [
    {"Path": f"{src_image_path}/610111200909022514.jpg", "Name": "刘淏心", "ID": "0"},
    {"Path": f"{src_image_path}/61011620090417764X.jpg", "Name": "夏海蓉", "ID": "1"},
    {"Path": f"{src_image_path}/610116200912214244.jpg", "Name": "武爱玉", "ID": "2"},
    {"Path": f"{src_image_path}/610116201001204242.jpg", "Name": "武爱玉", "ID": "3"},
    {"Path": f"{src_image_path}/610631200811281027.jpg", "Name": "顾如意", "ID": "4"},
    {"Path": f"{src_image_path}/610929200909043224.jpg", "Name": "周雲梦", "ID": "5"},
]

# 武爱玉  610116200912214244  康复技术2501班
# 武爱玉  610116201001204242  康复技术2501班
# 刘淏心  610111200909022514  康复技术2501班
# 夏海蓉  61011620090417764X  康复技术2501班
# 周雲梦  610929200909043224  康复技术2501班
# 顾如意  610631200811281027  康复技术2501班

from deepface import DeepFace

# 初始化人脸分析模型, ctx_id=-1 表示使用CPU, 0表示使用GPU
import cv2
# from insightface.app import FaceAnalysis
# import insightface
# app = FaceAnalysis(name='buffalo_l', providers=['CPUExecutionProvider'])
# app.prepare(ctx_id=-1)

# 初始化ChromaDB客户端和集合, cosine使用余弦相似度
db_path = f"{cwd}/face_db"

if os.path.exists(db_path):
    shutil.rmtree(db_path)
    print(f"已删除目录: {db_path}")

os.makedirs(db_path, exist_ok=True)

if os.path.exists(tmp_save_dir):
    shutil.rmtree(tmp_save_dir)
    print(f"已删除目录: {tmp_save_dir}")

os.makedirs(tmp_save_dir, exist_ok=True)

client = chromadb.PersistentClient(path=db_path)
collection = client.get_or_create_collection(
    name="face_embeddings",
    metadata={"hnsw:space": "cosine"}
)

# def get_face_embedding1(image_path):
#     """从图像中提取人脸嵌入向量"""
#     print(f"正在处理图像: {image_path}")
#     start_time = time.time()
#     img = cv2.imread(image_path)
#     if img is None:
#         raise ValueError(f"无法读取图像: {image_path}")
    
#     print("使用InsightFace进行人脸识别...")
#     faces = app.get(img)
#     end_time = time.time()
#     print(f"embedding execution time: {end_time - start_time:.2f} seconds")
#     if len(faces) < 1:
#         raise ValueError("图像中未检测到人脸")
#     if len(faces) > 1:
#         print("警告: 检测到多个人脸，使用第一个检测到的人脸")
    
#     emb = faces[0].embedding.tolist()
#     return emb

# Face recognition models
recognition_models = [
    "VGG-Face", "Facenet", "Facenet512", "OpenFace", "DeepFace", "DeepID", 
    "ArcFace", "Dlib", "SFace", "GhostFaceNet", "Buffalo_L"
]

normalize_models = [
   "base", "raw", "Facenet", "Facenet2018", "VGGFace", "VGGFace2", "ArcFace"
]

backend_models = [
    "opencv", "retinaface", "mtcnn", "ssd", "dlib", "mediapipe", 
    "yolov8", "yolov11n", "yolov11s", "yolov11m", "centerface"
]

def get_face_embedding2(opt: int, image_path, stu_name):
    """从图像中提取人脸嵌入向量"""
    # Facenet
    face_mode = recognition_models[2]
    normalizer = normalize_models[2]

    # VGG-Face
    # face_mode = recognition_models[0]
    # normalizer =  normalize_models[4]

    # ArcFace
    # face_mode = recognition_models[6]
    # normalizer =  normalize_models[6]   

    # detector = backend_models[0]
    # detector = backend_models[5]
    detector = backend_models[0]

    if opt == "load":
        detector = backend_models[5]
    
    if opt == "search":
        detector = backend_models[0]

    start_time = time.time()
    faces = DeepFace.represent(
        img_path = image_path,
        model_name = face_mode,
        align=True,
        normalization=normalizer,
        detector_backend=detector,
        enforce_detection=True,
    )
    end_time = time.time()

    # normalization (string): Normalize the input image before feeding it to the model.
    # Default is base. Options: base, raw, Facenet, Facenet2018, VGGFace, VGGFace2, ArcFace
    # (default is base).

    # detector_backend (string): face detector backend. Options: 'opencv', 'retinaface',
    # 'mtcnn', 'ssd', 'dlib', 'mediapipe', 'yolov8', 'yolov11n', 'yolov11s', 'yolov11m',
    # 'centerface' or 'skip' (default is opencv).
    
    if len(faces) < 1:
        raise ValueError(f"{stu_name}: 图像中未检测到人脸")
    
    # if len(faces) > 1:
    #     print(f"{stu_name} 警告: 检测到{len(faces)}个人脸，选择得分最高的人脸")

    # save_face_to_image(image_path, faces)
    best_face, best_score, best_index, best_image = get_best_face(image_path, faces, stu_name)
    print(f"{stu_name}: 共检测到{len(faces)}人脸, 选择了第{best_index+1}个人脸, 得分: {best_score:.2f}")

    if opt == "search":
        print(f"{stu_name}: 二次提取人脸图片{best_image}")
        detector = backend_models[5]
        faces = DeepFace.represent(
            img_path = best_image,
            model_name = face_mode,
            align=True,
            normalization=normalizer,
            detector_backend=detector,
            enforce_detection=True,
        )
        best_face, best_score, best_index, best_image = get_best_face(best_image, faces, stu_name)
        print(f"{stu_name}: 二次检测到{len(faces)}人脸, 选择了第{best_index+1}个人脸, 得分: {best_score:.2f}")
        
    # emb = faces[0]["embedding"]
    emb = best_face["embedding"]
    print(f"{stu_name}: 使用DeepFace: {face_mode}, embedding: {len(emb)} execution time: {end_time - start_time:.2f} seconds")
    # print(f"{stu_name}: 使用 DeepFace {face_mode} 处理 {image_path} embedding: {len(emb)} execution time: {end_time - start_time:.2f} seconds")
    return emb

def save_scan_faces_to_image(image_path, faces):
    # 读取原始图像
    img = cv2.imread(image_path)

    for index, face_info in enumerate(faces):
        # 获取第一个人脸的区域信息
        # face_info = faces[0]
        # print(face_info)
        facial_area = face_info["facial_area"]
        
        # 提取人脸区域 (x, y, w, h)
        x = facial_area["x"]
        y = facial_area["y"]
        w = facial_area["w"]
        h = facial_area["h"]
        
        # 裁剪人脸区域
        face_region = img[y:y+h, x:x+w]
        
        # 保存人脸区域图片
        output_path = os.path.join(tmp_save_dir, f"face_{index}.jpg")
        cv2.imwrite(output_path, face_region)
        print(f"第{index+1}个人脸区域已保存到: {output_path}")

def save_single_face_to_image(img, face, index, file_name_without_ext, score, stu_name):
    # 获取第一个人脸的区域信息
    # face_info = faces[0]
    # print(face_info)
    facial_area = face["facial_area"]
    
    # 提取人脸区域 (x, y, w, h)
    x = facial_area["x"]
    y = facial_area["y"]
    w = facial_area["w"]
    h = facial_area["h"]
    
    # 裁剪人脸区域
    face_region = img[y:y+h, x:x+w]
    
    # 保存人脸区域图片
    face_img_file = f"{file_name_without_ext}_{index}_{score:.2f}.jpg"
    output_path = os.path.join(tmp_save_dir, face_img_file)
    cv2.imwrite(output_path, face_region)
    # print(f"第{index+1}个人脸区域已保存到: {output_path}")
    print(f"{stu_name}: 第{index+1}个人脸区域已保存 {face_img_file}")
    return output_path

def get_best_face(image_path, faces, stu_name):
    img = cv2.imread(image_path)
    if img is None:
        raise ValueError(f"{stu_name} 无法读取图像: {image_path}")
    img_height, img_width = img.shape[:2]

    file_name_with_ext = os.path.basename(image_path)
    file_name_without_ext = os.path.splitext(file_name_with_ext)[0]

    best_face = None
    best_score = -1
    best_index = -1
    best_image = None
    
    for index, face in enumerate(faces):
        # 'facial_area': {'x': 223, 'y': 558, 'w': 309, 'h': 309, 'left_eye': None, 'right_eye': None}, 'face_confidence': 0.92}
        face_conf = face['face_confidence']
        facial_area = face['facial_area']
        x, y, w, h = facial_area['x'], facial_area['y'], facial_area['w'], facial_area['h']

        # 计算综合评分
        score = calculate_face_score(face, img_width, img_height)
        print(f"{stu_name}: 人脸{index+1}评分: {score:.2f}, 检测置信度: {face_conf}")

        face_image = save_single_face_to_image(img, face, index, file_name_without_ext, score, stu_name)

        if score > best_score:
            best_score = score
            best_face = face
            best_index = index
            best_image = face_image
    
    return best_face, best_score, best_index, best_image

def calculate_face_score(face, img_width, img_height):
    """计算人脸综合评分"""
    facial_area = face['facial_area']
    x, y, w, h = facial_area['x'], facial_area['y'], facial_area['w'], facial_area['h']
    
    score = 0
    
    # 1. 人脸大小权重 (40%)
    face_area = w * h
    img_area = img_width * img_height
    area_ratio = face_area / img_area
    size_score = min(area_ratio * 10, 1.0)  # 归一化到0-1
    score += size_score * 0.4
    
    # 2. 位置权重 (30%) - 倾向于中心位置
    center_x = x + w/2
    center_y = y + h/2
    distance_from_center = ((center_x - img_width/2)**2 + (center_y - img_height/2)**2)**0.5
    max_distance = ((img_width/2)**2 + (img_height/2)**2)**0.5
    position_score = 1 - (distance_from_center / max_distance)
    score += position_score * 0.3
    
    # 3. 置信度权重 (20%)
    if 'face_confidence' in face:
        confidence_score = face['face_confidence']
    else:
        # 如果没有置信度，使用人脸比例作为替代
        aspect_ratio = w / h
        # 正常人脸宽高比大约在0.7-0.8之间
        confidence_score = 1 - min(abs(aspect_ratio - 0.75), 0.3) / 0.3
    score += confidence_score * 0.2
    
    # 4. 边界检查 (10%) - 确保人脸没有超出图像边界
    border_penalty = 0
    if x < 0 or y < 0 or x + w > img_width or y + h > img_height:
        border_penalty = 0.1
    score -= border_penalty
    
    return max(0, min(score, 1.0))

def get_face_embedding(opt, image_path, stu_name):
    # return get_face_embedding1(image_path)
    return get_face_embedding2(opt, image_path, stu_name)

def load_face_embeddings_to_db():
    """将所有人脸嵌入向量加载到数据库"""
    detect_failed_images = []

    for person in image_set:
        print("*" * 50)
        try:
            stu_name = person["Name"]
            stu_image = person["Path"]
            # embedding = get_face_embedding1(person["Path"])
            embedding = get_face_embedding("load", stu_image, stu_name)
            collection.add(
                ids=[person["ID"]],
                embeddings=[embedding],
                metadatas=[{"name": stu_name}]
            )
            print(f"{stu_name}: 已添加 {person['ID']} 到数据库")
        except Exception as e:
            detect_failed_images.append(f"{stu_name}_{stu_image}")
            print(f"添加 {person['Name']} 到数据库失败: {str(e)}")
    
    print(f"初始化加载失败的人脸数据 {detect_failed_images}")

def create_result_visualization(query_image_path, results):
    """创建结果可视化图像"""
    # 读取查询图像
    query_img = Image.open(query_image_path)
    query_img = query_img.resize((300, 300))
    
    ids = results["ids"][0]
    distances = results["distances"][0]
    metadatas = results["metadatas"][0]

    # 创建结果图像
    result_width = 300 * len(ids)
    # result_width = 300
    result_height = 400
    result_img = Image.new('RGB', (result_width, result_height), color=(255, 255, 255))
    
    # 添加查询图像
    # result_img.paste(query_img, (0, 50))
    
    # 添加文字
    draw = ImageDraw.Draw(result_img)
    try:
        font = ImageFont.truetype("simhei.ttf", 20)  # 尝试加载中文字体
    except:
        font = ImageFont.load_default()
    
    draw.text((10, 10), "查询图像", fill=(0, 0, 0), font=font)
    
    # 添加匹配结果
    for i, (face_id, distance, metadata) in enumerate(zip(ids, distances, metadatas)):
        # 找到对应的图像路径
        match_path = None
        for person in image_set:
            if person["ID"] == face_id:
                match_path = person["Path"]
                break
        
        if match_path and os.path.exists(match_path):
            match_img = Image.open(match_path)
            match_img = match_img.resize((300, 300))
            result_img.paste(match_img, ((i)*300, 50))
            
    #         similarity = 1 - distance
    #         draw.text(((i+1)*300 + 10, 10), f"{metadata['name']} ({similarity:.2f})", fill=(0, 0, 0), font=font)
            
    #         # 添加相似度条
    #         bar_width = int(similarity * 280)
    #         draw.rectangle(((i+1)*300 + 10, 360, (i+1)*300 + 10 + bar_width, 380), fill=(0, 255, 0))
    #         draw.rectangle(((i+1)*300 + 10 + bar_width, 360, (i+1)*300 + 290, 380), fill=(200, 200, 200))
    
    return result_img

def search_face(input_image, top_k=3, threshold=0.35):
    """
    使用图像搜索相似人脸
    
    参数:
        input_image: 输入图像（PIL.Image或文件路径）
        top_k: 返回的最相似结果数量
        threshold: 相似度阈值
    
    返回:
        匹配的人脸列表和可视化结果
    """
    # print(f"input_image={input_image}")
    # 保存上传的图像到临时文件
    if isinstance(input_image, str):
        temp_path = input_image
    else:
        temp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".jpg", dir=tmp_save_dir)
        temp_path = temp_file.name
        input_image.save(temp_path)
        
    print(f"save to temp_path={temp_path}")
    
    try:
        # 获取查询图像的人脸嵌入向量
        query_embedding = get_face_embedding("search", temp_path, "compare")
        
        # 在Chroma中查询相似向量
        results = collection.query(
            query_embeddings=[query_embedding],
            n_results=top_k
        )
        
        # 处理结果
        matches = []
        if not results["ids"][0]:
            return "未找到相似人脸", None

        # 创建结果可视化
        result_image = create_result_visualization(temp_path, results)

        ids = results["ids"][0]
        distances = results["distances"][0]
        metadatas = results["metadatas"][0]

        # 构建匹配信息
        for i, (face_id, distance, metadata) in enumerate(zip(ids, distances, metadatas)):
            similarity = 1 - distance
            matches.append({
                "id": face_id,
                "name": metadata["name"],
                "similarity": similarity,
                "distance": distance
            })
            print(f"找到相似人脸 - ID: {face_id}, 姓名: {metadata['name']}, 相似度: {similarity:.4f}")

        match_info = "\n".join([f"姓名: {m['name']}, {m['id']}, 相似度: {m['similarity']*100:.0f}%" for m in matches])
        # match_info = "\n".join([f"ID: {m['id']}, 姓名: {m['name']}, 相似度: {m['similarity']:.2f}" for m in matches])

        # match_info = "识别结果: [洋洋, 相似度95%], [李宪, 相似度85%]"
        return match_info, result_image

    except Exception as e:
        print(f"处理图像时发生错误: {str(e)}")
        return f"错误: {str(e)}", None
    
    finally:
        # 如果是临时文件，删除它
        # pass
        if isinstance(input_image, Image.Image) and os.path.exists(temp_path):
            os.unlink(temp_path)

def process_image(input_image):
    """处理上传的图像并返回结果"""    
    # 搜索相似人脸
    print("#" * 50)
    match_info, result_image = search_face(input_image, top_k=3)
    return match_info, result_image

# 创建Gradio界面
with gr.Blocks(title="人脸识别系统") as demo:
    gr.Markdown("# 人脸识别系统")
    gr.Markdown("上传一张包含人脸的图片，系统将识别最相似的人脸")
    
    with gr.Row():
        with gr.Column():
            input_image = gr.Image(
                label="上传图片", 
                type="pil", 
                sources="upload",
                width=300,  # 设置显示宽度为300像素
                height=300  # 设置显示高度为300像素
            )
            submit_btn = gr.Button("开始识别")
            result_text = gr.Textbox(label="识别结果", lines=3, max_lines=10)
            result_image = gr.Image(label="可视化结果", interactive=False)
    
    submit_btn.click(
        fn=process_image,
        inputs=[input_image],
        # outputs=[result_text],
        outputs=[result_text, result_image],
    )
    
    gr.Markdown("## 使用说明")
    gr.Markdown("1. 上传一张清晰的人脸图片")
    gr.Markdown("2. 点击'开始识别'按钮")
    gr.Markdown("3. 系统将显示最相似的人脸及相似度")

# 启动应用
if __name__ == "__main__":
    # 检查数据库中是否有数据
    count = collection.count()
    if count > 0:
        print(f"已加载现有数据库，包含 {count} 条人脸记录")
    else:
        print("创建新数据库并初始化人脸数据...")
        load_face_embeddings_to_db()
    
    # 启动Gradio应用
    demo.launch(share=True, server_name="0.0.0.0", server_port=9022)
