import cupy as cp
import json
import os
import logging
import cv2
import my_camera
import logging

logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')

DATA_FILE = "data.json"
FACES_FILE = "faces.npy"


def initialize_data():
    """初始化数据，如果没有 faces.cpy，则生成两张随机人脸并保存；否则加载已有的 faces.cpy 文件进行 PCA 计算。"""
    if not os.path.exists(DATA_FILE) and not os.path.exists(FACES_FILE):
        # 如果 data.json 和 faces.cpy 都不存在，生成随机人脸并计算 PCA
        logging.info("Neither data.json nor faces.cpy found. Generating random faces...")

        logging.info("loading face 1...")
        face1 = preprocess_face(cv2.imread("datasheet/face_1.jpg", cv2.IMREAD_COLOR))
        logging.info("loading face 2...")
        face2 =preprocess_face(cv2.imread("datasheet/face_2.jpg", cv2.IMREAD_COLOR))
        logging.info("faces loaded.")

        # 保存人脸数据到 faces.cpy
        faces = cp.array([face1.flatten(), face2.flatten()])
        cp.save(FACES_FILE, faces)
        logging.info(f"Saved faces to {FACES_FILE}.")

        # 计算 PCA
        average_face, feature_matrix, feature_vectors = compute_pca(faces)
        logging.info("PCA computed.")

        # 保存 PCA 结果到 data.json
        data = {
            "average_face": average_face.tolist(),
            "feature_matrix": feature_matrix.tolist(),
            "feature_vectors": [
                {"name": "陈策", "vector": feature_vectors[0].tolist()},
                {"name": "安雨辰", "vector": feature_vectors[1].tolist()}
            ]
        }
        logging.info("Saving PCA data to data.json...")
        with open(DATA_FILE, "w") as f:
            json.dump(data, f, indent=4)
        logging.info(f"Database initialized and saved to {DATA_FILE}.")

    elif os.path.exists(FACES_FILE) and not os.path.exists(DATA_FILE):
        # 如果 faces.cpy 存在但 data.json 不存在，加载 faces.cpy 进行 PCA 运算
        logging.info(f"{FACES_FILE} found. Computing PCA...")

        faces = cp.load(FACES_FILE)
        average_face, feature_matrix, feature_vectors = compute_pca(faces)

        # 保存 PCA 结果到 data.json
        data = {
            "average_face": average_face.tolist(),
            "feature_matrix": feature_matrix.tolist(),
            "feature_vectors": [
                {"name": "random_face_1", "vector": feature_vectors[0].tolist()},
                {"name": "random_face_2", "vector": feature_vectors[1].tolist()}
            ]
        }
        logging.info(f"Saving PCA data to {DATA_FILE}...")
        with open(DATA_FILE, "w") as f:
            json.dump(data, f, indent=4)
        logging.info(f"PCA data saved to {DATA_FILE}.")

    else:
        logging.info(f"{DATA_FILE} exists, loading data...")

def compute_pca(faces, num_components=100):
    """ 计算 PCA 并返回平均人脸、特征矩阵和特征向量 """
    num_samples, num_features = faces.shape
    if num_samples < 2:
        raise ValueError("At least two faces are required for PCA.")

    # 计算平均人脸
    average_face = cp.mean(faces, axis=0)
    centered_faces = faces - average_face  # 中心化处理

    # 计算协方差矩阵：手动计算协方差矩阵以避免使用 NumPy
    covariance_matrix = cp.matmul(centered_faces.T, centered_faces) / (num_samples - 1)

    # 计算特征值和特征向量（eigh 返回的是对称矩阵的特征值和特征向量）
    eigenvalues, eigenvectors = cp.linalg.eigh(covariance_matrix)

    # 选取前 num_components 个最大特征值对应的特征向量
    sorted_indices = cp.argsort(eigenvalues)[::-1]  # 按特征值从大到小排序
    top_eigenvectors = eigenvectors[:, sorted_indices[:num_components]]

    # 计算投影特征
    feature_vectors = cp.dot(centered_faces, top_eigenvectors)

    return average_face, top_eigenvectors, feature_vectors

def preprocess_face(face, use_gamma=True):
    """ 预处理 BGR 格式人脸：灰度化、Gamma 矫正 或 直方图均衡化、展平 """
    gray = cv2.cvtColor(face, cv2.COLOR_BGR2GRAY)  # 转灰度
    gray = cv2.resize(gray, (100, 100))  # 缩放到 100x100，确保与训练数据相同
    if use_gamma:
        gray = my_camera.adaptive_gamma_correction(gray)  # Gamma 矫正
    else:
        gray = cv2.equalizeHist(gray)  # 直方图均衡化

    return gray.flatten()  # 展平成 1D 数组

def load_data():
    """ 加载 data.json """
    try:
        with open(DATA_FILE, "r") as f:
            data = json.load(f)

        # 确保返回的数据结构是正确的
        average_face = cp.array(data["average_face"]) if data["average_face"] else None
        feature_matrix = cp.array(data["feature_matrix"]) if data["feature_matrix"] else None
        feature_vectors = data["feature_vectors"] if "feature_vectors" in data else []

        logging.info(f"Data loaded: average_face {average_face.shape if average_face is not None else None}, "
                     f"feature_matrix {feature_matrix.shape if feature_matrix is not None else None}, "
                     f"feature_vectors {len(feature_vectors)} items")

        return {
            "average_face": average_face,
            "feature_matrix": feature_matrix,
            "feature_vectors": feature_vectors
        }

    except FileNotFoundError:
        logging.warning(f"Database not found at {DATA_FILE}, initializing empty data.")
        return {"average_face": None, "feature_matrix": None, "feature_vectors": []}

def save_data(average_face, feature_matrix, feature_vectors):
    """ 保存数据到 JSON 文件 """
    data = {
        "average_face": average_face if isinstance(average_face, list) else average_face.tolist(),
        "feature_matrix": feature_matrix if isinstance(feature_matrix, list) else feature_matrix.tolist(),
        "feature_vectors": [
            {"name": vec["name"], "vector": vec["vector"] if isinstance(vec["vector"], list) else vec["vector"].tolist()} 
            for vec in feature_vectors
        ]
    }
    with open(DATA_FILE, "w") as f:
        json.dump(data, f, indent=4)

    logging.info("Database updated: data.json")

def face_recognition(face, num=25):
    """ 识别人脸，返回匹配度最高的前三名 """
    data = load_data()

    if not data["feature_vectors"]:
        logging.warning("No faces in database.")
        return None

    # **确保数据是 GPU 数组**
    new_face_vector = cp.array(preprocess_face(face))  # (10000,)
    average_face = cp.array(data["average_face"])  # (10000,)
    feature_matrix = cp.array(data["feature_matrix"])  # (10000, 100)

    existing_vectors = cp.array([vec["vector"] for vec in data["feature_vectors"]])  # (N, 100)
    names = [vec["name"] for vec in data["feature_vectors"]]

    # **确保 feature_matrix 形状正确**
    if feature_matrix.shape[0] != new_face_vector.shape[0]:
        logging.error(f"Shape mismatch: new_face_vector {new_face_vector.shape} vs feature_matrix {feature_matrix.shape}")
        return None

    # **执行 PCA 投影**
    new_pca_vector = cp.dot(feature_matrix.T, (new_face_vector - average_face))  # (100,)
    new_pca_vector = new_pca_vector[:num]  # 只取前 num 维

    # **计算欧式距离**
    existing_pca_vectors = existing_vectors[:, :num]  # 只取前 num 维
    distances = cp.linalg.norm(existing_pca_vectors - new_pca_vector, axis=1)  # 批量计算

    # **获取前三个最小距离的索引**
    top3_indices = cp.argsort(distances)[:3]
    max_distance = cp.max(distances) if distances.size > 0 else 1  # 避免除零错误

    results = []
    for idx in top3_indices:
        confidence = 1 - (distances[idx] / max_distance)  # 置信度计算
        results.append((names[int(idx)], confidence))  # `idx` 是 `cp.int32`，需要转换成 Python `int`

    return results

def add_face(name, face, num_components=100):
    """ 添加新的人脸数据 """
    # 加载已有的 PCA 数据
    data = load_data()

    # 预处理输入的人脸，得到展平后的 10000 维灰度图
    new_face_vector = preprocess_face(face)
    logging.info(f"new_face_vector shape (before PCA): {new_face_vector.shape}")

    # 加载已有的人脸数据
    faces = cp.load(FACES_FILE)  # 加载 faces.cpy
    logging.info(f"Loaded faces shape: {faces.shape}")  # 打印 faces 的形状

    faces = cp.append(faces, [new_face_vector.flatten()], axis=0)  # 将新的人脸添加到现有的人脸数据中
    logging.info(f"Updated faces shape: {faces.shape}")  # 打印更新后 faces 的形状

    cp.save(FACES_FILE, faces)  # 保存更新后的人脸数据
    logging.info(f"Saved updated faces to {FACES_FILE}.")

    # 计算 PCA
    average_face, feature_matrix, feature_vectors = compute_pca(faces, num_components=num_components)
    logging.info("PCA computed.")

    # 更新特征向量列表
    feature_vectors_list = [{"name": feature["name"], "vector": feature["vector"]} for feature in data["feature_vectors"]]
    new_face_pca = feature_vectors[-1]  # 获取新计算的人脸特征向量
    feature_vectors_list.append({"name": name, "vector": new_face_pca.tolist()})  # 追加新特征向量

    # 保存更新后的数据
    save_data(average_face.tolist(), feature_matrix.tolist(), feature_vectors_list)
    logging.info(f"Added new face: {name}")


