import face_recognition
import cv2
from PIL import Image
import numpy as np
import os

# 通过传入参数 tolerance 来实现这个功能，
# 默认的容错率是0.6，容错率越低，识别越严格准确。
# 阈值太低容易造成无法成功识别人脸，太高容易造成人脸识别混淆。
TOLERANCE_INDEX = 0.39
#  如果想要更准确的识别，可以在计算编码时设定要重新采样的次数，face_encodings传入 num_jitters 来实现，
#  默认0，范围为0-100，越高越准确，但速度越慢，（100就会慢100倍）
NUM_JITTERS_INDEX = 100

class nestDict(dict):
    def __missing__(self, key):
        """
        创建嵌套字典：
        原理：当key找不到时，把
        :param key:字典的key，如果字典中key不存在，并且 __missing__() 未定义，则会引发 KeyError
        :return:返回新创建的字典
        """
        value = self[key] = type(self)()
        return value
    '''
    d = nestDict()
    d['空调']['通风']=0.9
    d['空调']['制冷']=0.8
    d['工程']['维护']=0.7
    d['工程']['施工']=0.6
    {'空调': {'通风': 0.9, '制冷': 0.8}, '工程': {'维护': 0.7, '施工': 0.6}}
    '''

def dealDrawFace(img, save_face_detection=False, save_img_path='', trait_model='hog', known_img_model_file_path='', deal_pattern='image'):
    """

    :param img: 图片数据
    :param save_face_detection: 是否保存检测出的人脸图片
    :param save_img_path: 图片数据
    :param recognition_model: 识别用的特征模型
    :param known_img_model_file_path: 人脸特征编码数据库文件路径[*.npy]
    :param deal_pattern:处理的模式[图片/视频（会采用缩小加快处理）]
    :return:
    """

    if deal_pattern == 'image':
        img_rgb = img[:, :, ::-1]
    elif deal_pattern == 'video':
        global NUM_JITTERS_INDEX
        NUM_JITTERS_INDEX = 10 # 视频识别的准确率，为了更快的识别
        # 调整视频帧的大小为1/4大小，以更快的人脸识别处理
        small_frame = cv2.resize(img, (0, 0), fx=0.25, fy=0.25)
        img_rgb = small_frame[:, :, ::-1]
    else:
        pass

    # 检测框位置,默认hog，可用cnn【基于CNN的最大边缘对象检测器(MMOD)】
    # cnn需要GPU加速（通过英伟达显卡的CUDA库驱动），你在编译安装dlib的时候也需要开启CUDA支持
    face_location = face_recognition.face_locations(img_rgb, model=trait_model)
    face_landmarks_list = face_recognition.face_landmarks(img_rgb)  # 面部轮廓位置

    # print(f'face_location:{face_location}')
    print(f'face_landmarks_list;{face_landmarks_list}')

    save_path_index = 1
    for i in range(len(face_location)):  # 绘制检测框
        # top, right, bottom, left
        rect = face_location[i]
        top, right, bottom, left = rect

        if deal_pattern == 'video':
            top *= 4
            right *= 4
            bottom *= 4
            left *= 4
        else:
            pass

        cv2.rectangle(img, (left, top), (right, bottom), (0, 0, 255), 2)

        if len(known_img_model_file_path) != 0: # 绘制文字
            know_people_coding_nest_dict = np.load(known_img_model_file_path, allow_pickle=True).item()
            unknown_coding_list = face_recognition.face_encodings(img_rgb, face_location, num_jitters=NUM_JITTERS_INDEX)

            for unknown_coding in unknown_coding_list:
                for name, know_people_coding_dict in know_people_coding_nest_dict.items():
                    for coding in know_people_coding_dict.values():
                        if face_recognition.compare_faces([coding], unknown_coding, tolerance=TOLERANCE_INDEX)[0]:
                            print(f'识别出:{name}')
                            font = cv2.FONT_HERSHEY_SIMPLEX
                            # 绘制文字方法1
                            cv2.rectangle(img, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
                            cv2.putText(img, name, (left + 6, bottom - 10), font, 1.0, (255, 255, 255), 1)
                            # 绘制文字方法2
                            # cv2.putText(img, name, (left + 20, top + 20), font, 1, (255, 255, 255), 4)
                            break
                        else:
                            pass

        if save_face_detection and len(save_img_path) != 0:  # 保存才剪出来的图片
            save_path = save_img_path[0:save_img_path.rfind('.')] + str(save_path_index) + '.jpg'
            print(f"save_path:{save_path}")
            save_img = Image.open(save_img_path)

            if save_img.mode == "P":
                save_img = save_img.convert('RGB')
            else:
                pass
            save_img = save_img.crop((left, top, right, bottom))

            save_img.save(save_path)
            save_img.close()
            save_path_index += 1
        else:
            pass

    for word, face_landmarks in enumerate(face_landmarks_list):  # 绘制面部轮廓点
        for key, marks in face_landmarks.items():
            for i in range(len(marks)):
                point = marks[i]
                p0 = point[0]
                p1 = point[1]
                if deal_pattern == 'video':
                    p0 *= 4
                    p1 *= 4
                else:
                    pass

                cv2.circle(img, (p0, p1), 2, (0, 255, 0))
                # img[point[1], point[0]]= [255,255,255]

def faceDetectionDraw(img_path, known_img_model_file_path='', save_face_detection=False, deal_pattern='image'):
    """
    绘制图片中人脸的位置，截取并保存人脸
    :param img_path: 图片路径
    :param save_face_detection: 是否保存检测出的人脸图片
    :param known_img_model_file_path:已知人脸编码库
    :return:
    """

    img = cv2.imread(img_path)

    dealDrawFace(img, save_face_detection=save_face_detection, save_img_path=img_path, known_img_model_file_path=known_img_model_file_path, deal_pattern=deal_pattern)

    cv2.imshow('人脸检测的结果:', img)
    # 使用0时，需要键盘打断
    cv2.waitKey(0)  # 这里5s后自动消失

def trainFaceRecognitionLibrary(img_dir, model_save_dir=''):
    """
    通过图片文件中的图片，建立已知人脸的特征编码库

    :param img_dir:图片文件夹
    :param save_dir:识别模型
    :return:
    """

    if img_dir[-1] == '/':
        img_dir = img_dir[0: len(img_dir) - 1]
    else:
        pass

    know_people_coding_nest_dict = nestDict()
    for dir_name in os.listdir(img_dir):
        dir_path = os.path.join(img_dir, dir_name)
        print(f'开始处理人:{dir_name}')
        if not os.path.isdir(dir_path):
            continue

        for file_name in os.listdir(dir_path):
            # print(f'file_name:{file_name}')
            if os.path.splitext(file_name)[1].lower() not in [".jpg", ".png", ".jpeg", ".webp"]:
                continue
            image_path = os.path.join(dir_path, file_name)
            # print(f'dir_name:{dir_name}')
            print(f'开始处理的人脸图片名:{file_name}')
            # print(f'处理的人脸图片路径:{image_path}')
            img = face_recognition.load_image_file(image_path)
            encoding_list = face_recognition.face_encodings(img, num_jitters=NUM_JITTERS_INDEX)

            if len(encoding_list) == 1:
                know_people_coding_nest_dict[dir_name][file_name.split('.')[0]] = encoding_list[0]
            else:
                print("{} 不适合训练：{}".format(file_name, "没有发现人脸" if len(encoding_list) == 0 else "发现超过多余一张的人脸"))

    # 存储数据
    if len(model_save_dir) == 0:
        np.save(img_dir + '/train_face_labels.npy', know_people_coding_nest_dict)
    else:
        np.save(model_save_dir)

def faceDetectionComparison(known_img_model_file_path, comparison_img_path):
    """
    未知图片中识别是否存在已知的人脸
    :param known_img_file_path:
    :param comparison_img_path:
    :return:
    """

    if os.path.splitext(os.path.basename(comparison_img_path))[1].lower() not in [".jpg", ".png", ".jpeg", ".webp"]:
        print('请使用图片类型的文件进行人脸比对')
        return
    else:
        pass

    know_people_coding_nest_dict = np.load(known_img_model_file_path, allow_pickle=True).item()

    unknown_img = face_recognition.load_image_file(comparison_img_path)
    unknown_coding_list = face_recognition.face_encodings(unknown_img, num_jitters=NUM_JITTERS_INDEX)

    print(f'检测到的人脸:{len(unknown_coding_list)}')

    matching_people = []

    for unknown_coding in unknown_coding_list:
        for name, know_people_coding_dict in know_people_coding_nest_dict.items():
            for coding in know_people_coding_dict.values():
                if face_recognition.compare_faces([coding], unknown_coding, tolerance=TOLERANCE_INDEX)[0]:
                    matching_people.append(name)
                    print(f'找到:{name}')
                    break
                else:
                    pass

    return list(set(matching_people))

def cameraCatchComparison(known_img_model_file_path):
    """
    使用摄像头进行人脸识别，速度比较慢（慢3-5s）
    :param known_img_model_file_path: 人脸编码识别库
    :return:
    """

    # 计算机摄像设备索引
    CAMERA_IDX = 0
    # 调用摄像头，conf.CAMERA_IDX为摄像头索引，默认为0，也可以这样写cap = cv2.VideoCapture(0)
    cap = cv2.VideoCapture(CAMERA_IDX)
    while True:
        # 读取一帧数据
        if cap.isOpened() == False:
            break
        ok, frame = cap.read()

        if not ok:
            break

        dealDrawFace(frame, known_img_model_file_path=known_img_model_file_path, deal_pattern='video')

        # 显示图像
        cv2.imshow('Face Recognition Video', frame)

        # 监听输入，按q退出
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    # 释放摄像头并销毁所有窗口
    cap.release()
    cv2.destroyAllWindows()

if __name__ == '__main__':
    #  # 训练人脸编码库
    # trainFaceRecognitionLibrary('/Users/mac/Downloads/人脸识别/kenow_people_database')

    # # 图片中绘制检测到的人脸
    npy_file = ''  # /Users/mac/Downloads/人脸识别/kenow_people_database/train_face_labels.npy
    img_path = '/Users/mac/Downloads/人脸识别/test/xf/fx2.jpg'
    faceDetectionDraw(img_path=img_path, known_img_model_file_path=npy_file, save_face_detection=True)

    # # 图片人脸检测（*）
    # npy_file = '/Users/mac/Downloads/人脸识别/kenow_people_database/train_face_labels.npy'
    # unknown_img_path = '/Users/mac/Downloads/人脸识别/test/xf/fx2.jpg'
    # print(faceDetectionComparison(npy_file, unknown_img_path))

    # # 视频检测
    # npy_file = '/Users/mac/Downloads/人脸识别/kenow_people_database/train_face_labels.npy'
    # cameraCatchComparison(npy_file)

    pass


