import os
from pathlib import Path

import face_recognition
import gradio as gr
from PIL import Image, ImageDraw

IMG_PATH = "./known"
IMG_PATH_Test = "./unknown"


def str_intercept(img_path):
    img_path_ = img_path[::-1]
    point_index = 0  # 记录反转后第一个点的位置
    slash_index = 0  # 记录反转后第一个斜杠的位置

    flag_pi = 0
    flag_si = 0

    for i in range(len(img_path_)):
        if (img_path_[i] == "." and flag_pi == 0):
            point_index = i
            flag_pi = 1

        if (img_path_[i] == "/" and flag_si == 0):
            slash_index = i
            flag_si = 1

    point_index = len(img_path) - 1 - point_index
    slash_index = len(img_path) - 1 - slash_index

    return point_index, slash_index


# 人脸录入
def face_entry(img_path, name_text):

    point_index, slash_index = str_intercept(img_path)
    img_renamePath = f"{img_path[:slash_index+1]}{name_text}{img_path[point_index:]}"
    os.rename(img_path, img_renamePath)
    img_ = Image.open(img_renamePath)
    print(img_renamePath)

    return img_, img_renamePath, name_text


# 设置示例
def set_example_image(example: list):
    return gr.Image.update(value=example[0])


def face_recognition_(img_srcPath, img_tagPath, img_personName):
    image_of_person = face_recognition.load_image_file(img_srcPath)
    person_face_encoding = face_recognition.face_encodings(image_of_person)[0]

    known_face_encodings = [
        person_face_encoding,]

    known_face_names = [
        img_personName,]

    test_image = face_recognition.load_image_file(img_tagPath)

    face_locations = face_recognition.face_locations(test_image)
    face_encodings = face_recognition.face_encodings(test_image, face_locations)

    pil_image = Image.fromarray(test_image)
    draw = ImageDraw.Draw(pil_image)

    for (top, right, bottom, left), face_encoding in zip(face_locations, face_encodings):
        matches = face_recognition.compare_faces(known_face_encodings, face_encoding)

        name = "Unknown Person"

        if True in matches:
            first_matches_index = matches.index(True)
            name = known_face_names[first_matches_index]

        draw.rectangle(((left, top), (right, bottom)), outline=(0, 0, 0))

        text_width, text_height = draw.textsize(name)
        draw.rectangle(((left, bottom - text_height - 10), (right, bottom)), fill=(0, 0, 0), outline=(0, 0, 0))
        draw.text((left + 6, bottom - text_height - 5), name, fill=(255, 255, 255, 255))

    del draw
    return pil_image


with gr.Blocks() as demo:
    # 人脸识别 录入
    with gr.Row():
        with gr.Column():
            with gr.Row():
                input_img = gr.Image(image_mode="RGB", source="upload", type="filepath", label="人脸录入")
            with gr.Row():
                input_name = gr.Textbox(label="姓名")
            with gr.Row():
                btn = gr.Button(label="录入")
            with gr.Row():
                paths = sorted(Path(IMG_PATH).rglob('*.jpg'))
                example_images = gr.Dataset(components=[input_img], samples=[[path.as_posix()] for path in paths])

        with gr.Column():
            with gr.Row():
                output_ = gr.Image(image_mode="RGB", source="upload", type="pil", label="录入图片")
                input_srcImg = gr.Variable(value="")
                input_srcName = gr.Variable(value="")

    # 人脸识别 测试
    with gr.Row():
        with gr.Column():
            with gr.Row():
                input_img_test = gr.Image(image_mode="RGB", source="upload", type="filepath", label="测试图片")
            with gr.Row():
                btn_test = gr.Button(label="测试")
            with gr.Row():
                paths = sorted(Path(IMG_PATH_Test).rglob('*.jpg'))
                example_images_test = gr.Dataset(components=[input_img], samples=[[path.as_posix()] for path in paths])

        with gr.Column():
            with gr.Row():
                output_test = gr.Image(image_mode="RGB", source="upload", type="pil", label="识别图片")

    btn.click(fn=face_entry, inputs=[input_img, input_name], outputs=[output_, input_srcImg, input_srcName])

    example_images.click(fn=set_example_image, inputs=[
        example_images,], outputs=[
            input_img,])

    btn_test.click(fn=face_recognition_, inputs=[input_srcImg, input_img_test, input_srcName], outputs=[output_test])
    example_images_test.click(fn=set_example_image, inputs=[
        example_images_test,], outputs=[
            input_img_test,])

    demo.launch(inbrowser=True)
