import concurrent.futures
import face_recognition
import redis
import numpy as np
import base64
import cv2
import os
import json
from config.config import REDIS_HOST, REDIS_PORT, FACE_LIB_PREFIX, IN_FACE_SIMILARITY_THRESHOLD, \
    RECON_SIMILARITY_THRESHOLD, REDIS_PASSWORD, REDIS_TIMEOUT, REDIS_MAX_ACTIVE

ALL_ENCODINGS_MAP = {}

# Redis 连接池
pool = redis.ConnectionPool(
    host=REDIS_HOST,
    port=REDIS_PORT,
    password=REDIS_PASSWORD,
    socket_timeout=REDIS_TIMEOUT / 1000,
    max_connections=REDIS_MAX_ACTIVE,
)
r = redis.Redis(connection_pool=pool)


def backup_face_data():
    """
    备份 Redis 中的人脸特征数据到 JSON 文件
    """
    # 创建备份目录
    backup_dir = 'backups'
    if not os.path.exists(backup_dir):
        os.makedirs(backup_dir)
    # 获取所有键
    keys = r.hvals("face_libs")
    back_filenames = []
    for key in keys:
        key_str = key.decode('utf-8')
        prefixed_lib_uuid = f"{FACE_LIB_PREFIX}{key_str}"
        all_data = r.hgetall(prefixed_lib_uuid)
        # 将字节类型的编码转换为列表，方便存储为 JSON
        decoded_data = {}
        for userid, encoding_bytes in all_data.items():
            userid_str = userid.decode('utf-8')
            encoding = np.frombuffer(encoding_bytes, dtype=np.float64).tolist()
            decoded_data[userid_str] = encoding
        backup_filename = os.path.join(backup_dir, f"{key_str}.json")
        with open(backup_filename, 'w') as f:
            json.dump(decoded_data, f, indent=4)
        print(f"Face library data saved to {backup_filename}")
        back_filenames.append(backup_filename)
    return back_filenames


def restore_face_data(libName: str):
    """
    从 JSON 文件中恢复人脸特征数据到 Redis

    :param libName:
    :return: 成功返回 0 和成功信息，失败返回 1 和错误信息
    """
    try:
        filename = os.path.join('backups', f"{libName}.json")
        prefixed_lib_uuid = f"{FACE_LIB_PREFIX}{libName}"
        # 读取 JSON 文件
        with open(filename, 'r') as f:
            decoded_data = json.load(f)
        # 清空 Redis 中现有的人脸库数据
        r.delete(prefixed_lib_uuid)
        # 将数据重新存入 Redis
        for userid_str, encoding_list in decoded_data.items():
            encoding = np.array(encoding_list, dtype=np.float64)
            r.hset(prefixed_lib_uuid, userid_str, encoding.tobytes())
        r.hset("face_libs", libName, libName)
        # 清空缓存
        ALL_ENCODINGS_MAP.pop(libName, None)
        print(f"Face library data restored from {filename}")
        return 0, "人脸特征数据恢复成功"
    except Exception as e:
        print(f"Failed to restore face library data: {str(e)}")
        return 1, f"人脸特征数据恢复失败: {str(e)}"


# 定义初始化人脸底库函数
def init_face_lib(lib_name: str):
    try:
        # 判断 lib_name 是否已经存在
        if r.hexists("face_libs", lib_name):
            return 1, "人脸库名称已存在，初始化失败", None
        r.hset("face_libs", lib_name, lib_name)
        return 0, "成功", lib_name
    except Exception as e:
        return 1, f"初始化失败: {str(e)}", None


# 定义删除人脸底库函数
def delete_face_lib(lib_uuid: str):
    try:
        prefixed_lib_uuid = f"{FACE_LIB_PREFIX}{lib_uuid}"
        deleted = r.hdel("face_libs", lib_uuid)
        if deleted:
            r.delete(prefixed_lib_uuid)
            ALL_ENCODINGS_MAP.pop(lib_uuid, None)
            return 0, "成功"
        else:
            return 1, "人脸底库不存在，删除失败"
    except Exception as e:
        return 1, f"删除人脸底库失败: {str(e)}"


# 查询所有人脸底库
def query_face_lib():
    keys = r.hvals("face_libs")
    return keys


# 人脸入库
def face_enrollment(lib_uuid: str, image: str, userid: str, similarity_threshold=IN_FACE_SIMILARITY_THRESHOLD):
    try:
        # 给 lib_uuid 添加前缀
        prefixed_lib_uuid = f"{FACE_LIB_PREFIX}{lib_uuid}"
        # 检查 userid 是否已经存在
        if r.hexists(prefixed_lib_uuid, userid):
            return 1, "用户 ID 已存在，请勿重复添加"
        img_np = base64_to_img(image)
        # 使用 face_recognition 库提取人脸特征
        face_locations = face_recognition.face_locations(img_np, number_of_times_to_upsample=1)
        if not face_locations:
            face_locations = face_recognition.face_locations(img_np, number_of_times_to_upsample=2)
        # 2. HOG未检测到，尝试CNN模型（更精准，适合侧脸/多角度，但速度较慢）
        if not face_locations:
            face_locations = face_recognition.face_locations(img_np, number_of_times_to_upsample=1, model='cnn')
        face_encodings = face_recognition.face_encodings(img_np, face_locations)

        # 检查检测到的人脸数量
        face_count = len(face_encodings)
        if face_count == 0:
            return 1, "未检测到人脸"

        # 取第一个检测到的人脸特征
        face_encoding = face_encodings[0]

        # 获取人脸库中的所有特征
        all_encodings = r.hgetall(prefixed_lib_uuid)

        if all_encodings:
            # 准备已知人脸编码
            known_encodings = [np.frombuffer(enc, dtype=np.float64) for enc in all_encodings.values()]
            # 计算相似度
            face_distances = face_recognition.face_distance(known_encodings, face_encoding)
            # 计算得分
            scores = 1 - face_distances
            # 检查是否有相似度超过阈值的人脸
            if any(score > similarity_threshold for score in scores):
                return 1, "相同人像已入库，请勿重复添加"

        # 给 lib_uuid 添加前缀
        prefixed_lib_uuid = f"{FACE_LIB_PREFIX}{lib_uuid}"
        # 使用 Redis 的哈希表存储人脸信息，键为人脸库 UUID，值为用户 ID 和人脸编码
        r.hset(prefixed_lib_uuid, userid, face_encoding.tobytes())
        ALL_ENCODINGS_MAP.pop(lib_uuid, None)
        return 0, "人脸入库成功"
    except Exception as e:
        return 1, f"人脸入库失败: {str(e)}"


# 定义删除人脸函数
def delete_face(lib_uuid: str, userid: str):
    try:
        # 给 lib_uuid 添加前缀
        prefixed_lib_uuid = f"{FACE_LIB_PREFIX}{lib_uuid}"
        # 删除 Redis 中人脸库 UUID 对应的用户 ID 记录
        deleted = r.hdel(prefixed_lib_uuid, userid)
        if deleted:
            ALL_ENCODINGS_MAP.pop(lib_uuid, None)
            return 0, "成功", True
        else:
            return 1, "用户不存在，删除失败", False
    except Exception as e:
        return 1, f"删除人脸失败: {str(e)}", False


# 定义获取人脸名称列表函数
def get_face_name_list(lib_uuid: str):
    try:
        # 给 lib_uuid 添加前缀
        prefixed_lib_uuid = f"{FACE_LIB_PREFIX}{lib_uuid}"
        # 获取 Redis 中人脸库 UUID 对应的所有用户 ID
        user_ids = r.hkeys(prefixed_lib_uuid)
        # 将字节类型转换为字符串类型
        face_name_list = [user_id.decode('utf-8') for user_id in user_ids]
        return 0, "成功", face_name_list
    except Exception as e:
        return 1, f"获取人脸名称列表失败: {str(e)}", []


# 定义 1:N 人脸识别函数
def face_recognition_1_n(lib_uuid: str, image: str, similarity_threshold: float = RECON_SIMILARITY_THRESHOLD):
    try:
        img_np = base64_to_img(image)
        # 检测人脸位置和编码
        face_locations = face_recognition.face_locations(img_np, number_of_times_to_upsample=1)
        if not face_locations:
            face_locations = face_recognition.face_locations(img_np, number_of_times_to_upsample=2)
        # 2. HOG未检测到，尝试CNN模型（更精准，适合侧脸/多角度，但速度较慢）
        if not face_locations:
            face_locations = face_recognition.face_locations(img_np, number_of_times_to_upsample=1, model='cnn')
        face_encodings = face_recognition.face_encodings(img_np, face_locations)

        # 检查是否检测到人脸
        if not face_encodings:
            return 1, [], "未检测到人脸"
        all_encodings = get_face_encodings(lib_uuid)
        if not all_encodings:
            return 1, [], "人脸库为空"

        # 准备已知人脸编码和对应的用户 ID
        known_encodings = [np.frombuffer(enc, dtype=np.float64) for enc in all_encodings.values()]
        user_ids = [user_id.decode('utf-8') for user_id in all_encodings.keys()]

        results = []
        # 重用线程池以减少开销
        with concurrent.futures.ThreadPoolExecutor(max_workers=len(face_encodings)) as executor:
            futures = [executor.submit(process_face, face_encoding, known_encodings, user_ids, similarity_threshold) for
                       face_encoding in face_encodings]
            for future in concurrent.futures.as_completed(futures):
                result = future.result()
                if result:
                    results.append(result)

        return 0, results, "成功"
    except Exception as e:
        return 1, [], f"人脸识别失败: {str(e)}"


def process_face(face_encoding, known_encodings, user_ids, similarity_threshold):
    # 计算相似度
    face_distances = face_recognition.face_distance(known_encodings, face_encoding)
    # 找到最相似的人脸
    best_match_index = np.argmin(face_distances)
    person_score = 1 - face_distances[best_match_index]
    if person_score > similarity_threshold:
        rec_user_id = user_ids[best_match_index]
        return {"recUserID": rec_user_id, "personScore": f"{person_score:.2f}"}
    return None


def get_face_encodings(lib_uuid: str):
    if lib_uuid in ALL_ENCODINGS_MAP and ALL_ENCODINGS_MAP[lib_uuid] is not None:
        return ALL_ENCODINGS_MAP[lib_uuid]
    # 给 lib_uuid 添加前缀
    prefixed_lib_uuid = f"{FACE_LIB_PREFIX}{lib_uuid}"
    # 使用管道批量获取数据减少网络开销
    pipe = r.pipeline()
    pipe.hgetall(prefixed_lib_uuid)
    all_encodings = pipe.execute()[0]
    ALL_ENCODINGS_MAP[lib_uuid] = all_encodings
    return all_encodings


# 定义人脸比较
def face_compare(image_A: str, image_B: str):
    try:
        img_np_A = base64_to_img(image_A)
        img_np_B = base64_to_img(image_B)
        if img_np_A is None or img_np_B is None:
            return 1, None, None, "图片数据不是有效的 base64 编码"

        face_encodings_A = face_recognition.face_encodings(img_np_A)
        face_encodings_B = face_recognition.face_encodings(img_np_B)

        if len(face_encodings_A) == 0 or len(face_encodings_B) == 0:
            return 1, None, None, "未检测到人脸"
        encoding_A = face_encodings_A[0]
        encoding_B = face_encodings_B[0]
        face_distance = face_recognition.face_distance([encoding_A], encoding_B)[0]
        score = 1 - face_distance
        result = 0 if score >= RECON_SIMILARITY_THRESHOLD else 1  # 阈值可按需调整
        return 0, result, f"{score:.2f}", "成功"
    except Exception as e:
        return 1, None, None, f"人脸比对失败: {str(e)}"


def base64_to_img(base64_str):
    img_data = base64.b64decode(base64_str)
    nparr = np.frombuffer(img_data, np.uint8)
    image = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
    img_size = len(img_data)
    # 根据图片文件大小选择不同的压缩比
    if img_size > 5 * 1024 * 1024:  # 大于 5MB
        scale_factor = 0.1
    elif img_size > 2 * 1024 * 1024:  # 大于 2MB
        scale_factor = 0.25
    elif img_size > 1 * 1024 * 1024:  # 大于 1MB
        scale_factor = 0.5
    else:
        scale_factor = 1.0
    if scale_factor != 1.0:
        image = cv2.resize(image, (0, 0), fx=scale_factor, fy=scale_factor)
    rgb_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    return rgb_image
