from libs.PipeLine import PipeLine
from libs.AIBase import AIBase
from libs.AI2D import Ai2d
from libs.Utils import *
import os, sys, ujson, gc, math
from media.media import *
import nncase_runtime as nn
import ulab.numpy as np
import image
import aidemo
import time

# 自定义人脸检测任务类
class FaceDetApp(AIBase):
    def __init__(self, kmodel_path, model_input_size, anchors, confidence_threshold=0.25, nms_threshold=0.3, rgb888p_size=[1920, 1080], display_size=[1920, 1080], debug_mode=0):
        super().__init__(kmodel_path, model_input_size, rgb888p_size, debug_mode)
        # kmodel路径
        self.kmodel_path = kmodel_path
        # 检测模型输入分辨率
        self.model_input_size = model_input_size
        # 置信度阈值
        self.confidence_threshold = confidence_threshold
        # nms阈值
        self.nms_threshold = nms_threshold
        self.anchors = anchors
        # sensor给到AI的图像分辨率，宽16字节对齐
        self.rgb888p_size = [ALIGN_UP(rgb888p_size[0], 16), rgb888p_size[1]]
        # 视频输出VO分辨率，宽16字节对齐
        self.display_size = [ALIGN_UP(display_size[0], 16), display_size[1]]
        # debug模式
        self.debug_mode = debug_mode
        # 实例化Ai2d，用于实现模型预处理
        self.ai2d = Ai2d(debug_mode)
        # 设置Ai2d的输入输出格式和类型
        self.ai2d.set_ai2d_dtype(nn.ai2d_format.NCHW_FMT, nn.ai2d_format.NCHW_FMT, np.uint8, np.uint8)

    # 配置预处理操作，这里使用了pad和resize，Ai2d支持crop/shift/pad/resize/affine，具体代码请打开/sdcard/app/libs/AI2D.py查看
    def config_preprocess(self, input_image_size=None):
        with ScopedTiming("set preprocess config", self.debug_mode > 0):
            # 初始化ai2d预处理配置，默认为sensor给到AI的尺寸，可以通过设置input_image_size自行修改输入尺寸
            ai2d_input_size = input_image_size if input_image_size else self.rgb888p_size
            top, bottom, left, right, _ = letterbox_pad_param(self.rgb888p_size, self.model_input_size)
            self.ai2d.pad([0, 0, 0, 0, top, bottom, left, right], 0, [104, 117, 123])  # 填充边缘
            # 设置resize预处理
            self.ai2d.resize(nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel)
            # 构建预处理流程,参数为预处理输入tensor的shape和预处理输出的tensor的shape
            self.ai2d.build([1, 3, ai2d_input_size[1], ai2d_input_size[0]], [1, 3, self.model_input_size[1], self.model_input_size[0]])

    # 自定义后处理，results是模型输出的array列表，这里使用了aidemo库的face_det_post_process接口
    def postprocess(self, results):
        with ScopedTiming("postprocess", self.debug_mode > 0):
            res = aidemo.face_det_post_process(self.confidence_threshold, self.nms_threshold, self.model_input_size[0], self.anchors, self.rgb888p_size, results)
            if len(res) == 0:
                return res, res
            else:
                return res[0], res[1]

# 自定义人脸注册任务类
class FaceRegistrationApp(AIBase):
    def __init__(self, kmodel_path, model_input_size, rgb888p_size=[1920, 1080], display_size=[1920, 1080], debug_mode=0):
        super().__init__(kmodel_path, model_input_size, rgb888p_size, debug_mode)
        # kmodel路径
        self.kmodel_path = kmodel_path
        # 检测模型输入分辨率
        self.model_input_size = model_input_size
        # sensor给到AI的图像分辨率，宽16字节对齐
        self.rgb888p_size = [ALIGN_UP(rgb888p_size[0], 16), rgb888p_size[1]]
        # 视频输出VO分辨率，宽16字节对齐
        self.display_size = [ALIGN_UP(display_size[0], 16), display_size[1]]
        # debug模式
        self.debug_mode = debug_mode
        # 标准5官
        self.umeyama_args_112 = [
            38.2946, 51.6963,
            73.5318, 51.5014,
            56.0252, 71.7366,
            41.5493, 92.3655,
            70.7299, 92.2041
        ]
        self.ai2d = Ai2d(debug_mode)
        self.ai2d.set_ai2d_dtype(nn.ai2d_format.NCHW_FMT, nn.ai2d_format.NCHW_FMT, np.uint8, np.uint8)

    # 配置预处理操作，这里使用了affine，Ai2d支持crop/shift/pad/resize/affine，具体代码请打开/sdcard/app/libs/AI2D.py查看
    def config_preprocess(self, landm, input_image_size=None):
        with ScopedTiming("set preprocess config", self.debug_mode > 0):
            ai2d_input_size = input_image_size if input_image_size else self.rgb888p_size
            # 计算affine矩阵，并设置仿射变换预处理
            affine_matrix = self.get_affine_matrix(landm)
            self.ai2d.affine(nn.interp_method.cv2_bilinear, 0, 0, 127, 1, affine_matrix)
            # 构建预处理流程,参数为预处理输入tensor的shape和预处理输出的tensor的shape
            self.ai2d.build([1, 3, ai2d_input_size[1], ai2d_input_size[0]], [1, 3, self.model_input_size[1], self.model_input_size[0]])

    # 自定义后处理
    def postprocess(self, results):
        with ScopedTiming("postprocess", self.debug_mode > 0):
            return results[0][0]
            # 新增：验证特征提取输出维度
            print(f"模型提取特征维度: {len(feature)}")
            return feature


    def svd22(self, a):
        # svd
        s = [0.0, 0.0]
        u = [0.0, 0.0, 0.0, 0.0]
        v = [0.0, 0.0, 0.0, 0.0]
        s[0] = (math.sqrt((a[0] - a[3]) ** 2 + (a[1] + a[2]) ** 2) + math.sqrt((a[0] + a[3]) ** 2 + (a[1] - a[2]) ** 2)) / 2
        s[1] = abs(s[0] - math.sqrt((a[0] - a[3]) ** 2 + (a[1] + a[2]) ** 2))
        v[2] = math.sin((math.atan2(2 * (a[0] * a[1] + a[2] * a[3]), a[0] ** 2 - a[1] ** 2 + a[2] ** 2 - a[3] ** 2)) / 2) if \
        s[0] > s[1] else 0
        v[0] = math.sqrt(1 - v[2] ** 2)
        v[1] = -v[2]
        v[3] = v[0]
        u[0] = -(a[0] * v[0] + a[1] * v[2]) / s[0] if s[0] != 0 else 1
        u[2] = -(a[2] * v[0] + a[3] * v[2]) / s[0] if s[0] != 0 else 0
        u[1] = (a[0] * v[1] + a[1] * v[3]) / s[1] if s[1] != 0 else -u[2]
        u[3] = (a[2] * v[1] + a[3] * v[3]) / s[1] if s[1] != 0 else u[0]
        v[0] = -v[0]
        v[2] = -v[2]
        return u, s, v

    def image_umeyama_112(self, src):
        # 使用Umeyama算法计算仿射变换矩阵
        SRC_NUM = 5
        SRC_DIM = 2
        src_mean = [0.0, 0.0]
        dst_mean = [0.0, 0.0]
        for i in range(0, SRC_NUM * 2, 2):
            src_mean[0] += src[i]
            src_mean[1] += src[i + 1]
            dst_mean[0] += self.umeyama_args_112[i]
            dst_mean[1] += self.umeyama_args_112[i + 1]
        src_mean[0] /= SRC_NUM
        src_mean[1] /= SRC_NUM
        dst_mean[0] /= SRC_NUM
        dst_mean[1] /= SRC_NUM
        src_demean = [[0.0, 0.0] for _ in range(SRC_NUM)]
        dst_demean = [[0.0, 0.0] for _ in range(SRC_NUM)]
        for i in range(SRC_NUM):
            src_demean[i][0] = src[2 * i] - src_mean[0]
            src_demean[i][1] = src[2 * i + 1] - src_mean[1]
            dst_demean[i][0] = self.umeyama_args_112[2 * i] - dst_mean[0]
            dst_demean[i][1] = self.umeyama_args_112[2 * i + 1] - dst_mean[1]
        A = [[0.0, 0.0], [0.0, 0.0]]
        for i in range(SRC_DIM):
            for k in range(SRC_DIM):
                for j in range(SRC_NUM):
                    A[i][k] += dst_demean[j][i] * src_demean[j][k]
                A[i][k] /= SRC_NUM
        T = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]
        U, S, V = self.svd22([A[0][0], A[0][1], A[1][0], A[1][1]])
        T[0][0] = U[0] * V[0] + U[1] * V[2]
        T[0][1] = U[0] * V[1] + U[1] * V[3]
        T[1][0] = U[2] * V[0] + U[3] * V[2]
        T[1][1] = U[2] * V[1] + U[3] * V[3]
        scale = 1.0
        src_demean_mean = [0.0, 0.0]
        src_demean_var = [0.0, 0.0]
        for i in range(SRC_NUM):
            src_demean_mean[0] += src_demean[i][0]
            src_demean_mean[1] += src_demean[i][1]
        src_demean_mean[0] /= SRC_NUM
        src_demean_mean[1] /= SRC_NUM
        for i in range(SRC_NUM):
            src_demean_var[0] += (src_demean_mean[0] - src_demean[i][0]) * (src_demean_mean[0] - src_demean[i][0])
            src_demean_var[1] += (src_demean_mean[1] - src_demean[i][1]) * (src_demean_mean[1] - src_demean[i][1])
        src_demean_var[0] /= SRC_NUM
        src_demean_var[1] /= SRC_NUM
        scale = 1.0 / (src_demean_var[0] + src_demean_var[1]) * (S[0] + S[1])
        T[0][2] = dst_mean[0] - scale * (T[0][0] * src_mean[0] + T[0][1] * src_mean[1])
        T[1][2] = dst_mean[1] - scale * (T[1][0] * src_mean[0] + T[1][1] * src_mean[1])
        T[0][0] *= scale
        T[0][1] *= scale
        T[1][0] *= scale
        T[1][1] *= scale
        return T

    def get_affine_matrix(self, sparse_points):
        # 获取affine变换矩阵
        with ScopedTiming("get_affine_matrix", self.debug_mode > 1):
            # 使用Umeyama算法计算仿射变换矩阵
            matrix_dst = self.image_umeyama_112(sparse_points)
            matrix_dst = [matrix_dst[0][0], matrix_dst[0][1], matrix_dst[0][2],
                          matrix_dst[1][0], matrix_dst[1][1], matrix_dst[1][2]]
            return matrix_dst

# 人脸识别任务类
class FaceRecognition:
    def __init__(self, pl, debug_mode=0):
        # 添加显示模式，默认hdmi，可选hdmi/lcd/lt9611/st7701/hx8399,其中hdmi默认置为lt9611，分辨率1920*1080；lcd默认置为st7701，分辨率800*480
        display_mode = "lcd"
        # k230保持不变，k230d可调整为[640,360]
        rgb888p_size = [1280, 720]
        # 人脸检测模型路径
        face_det_kmodel_path = "/sdcard/examples/kmodel/face_detection_320.kmodel"
        # 人脸识别模型路径
        face_reg_kmodel_path = "/sdcard/examples/kmodel/face_recognition.kmodel"
        # 其它参数
        anchors_path = "/sdcard/examples/utils/prior_data_320.bin"
        database_dir = "/sdcard/examples/utils/db/"
        face_det_input_size = [320, 320]
        face_reg_input_size = [112, 112]
        confidence_threshold = 0.5
        nms_threshold = 0.2
        anchor_len = 4200
        det_dim = 4
        anchors = np.fromfile(anchors_path, dtype=np.float)
        anchors = anchors.reshape((anchor_len, det_dim))
        face_recognition_threshold = 0.75        # 人脸识别阈值

        display_size = pl.get_display_size()

        # 人脸检测模型路径
        self.face_det_kmodel = face_det_kmodel_path
        # 人脸识别模型路径
        self.face_reg_kmodel = face_reg_kmodel_path
        # 人脸检测模型输入分辨率
        self.det_input_size = face_det_input_size
        # 人脸识别模型输入分辨率
        self.reg_input_size = face_reg_input_size
        self.database_dir = database_dir
        # anchors
        self.anchors = anchors
        # 置信度阈值
        self.confidence_threshold = confidence_threshold
        # nms阈值
        self.nms_threshold = nms_threshold
        self.face_recognition_threshold = face_recognition_threshold
        # sensor给到AI的图像分辨率，宽16字节对齐
        self.rgb888p_size = [ALIGN_UP(rgb888p_size[0], 16), rgb888p_size[1]]
        # 视频输出VO分辨率，宽16字节对齐
        self.display_size = [ALIGN_UP(display_size[0], 16), display_size[1]]
        # debug_mode模式
        self.debug_mode = debug_mode
        self.max_register_face = 100                  # 数据库最多人脸个数
        self.feature_num = 128                        # 人脸识别特征维度
        self.valid_register_face = 0                  # 已注册人脸数
        self.db_name = []
        self.db_data = []
        self.face_det = FaceDetApp(self.face_det_kmodel, model_input_size=self.det_input_size, anchors=self.anchors, confidence_threshold=self.confidence_threshold, nms_threshold=self.nms_threshold, rgb888p_size=self.rgb888p_size, display_size=self.display_size, debug_mode=0)
        self.face_reg = FaceRegistrationApp(self.face_reg_kmodel, model_input_size=self.reg_input_size, rgb888p_size=self.rgb888p_size, display_size=self.display_size)
        self.face_det.config_preprocess()
        # 人脸数据库初始化
        self.database_init()
        self.pl = pl

    # run函数
    def run(self, input_np):
        # 执行人脸检测
        det_boxes, landms = self.face_det.run(input_np)
        recg_res = []
        for landm in landms:
            # 针对每个人脸五官点，推理得到人脸特征，并计算特征在数据库中相似度
            self.face_reg.config_preprocess(landm)
            feature = self.face_reg.run(input_np)
            # 新增：打印实时特征原始维度
            print(f"原始特征维度: {len(feature)}")       
            res = self.database_search(feature)
            recg_res.append(res)
        return det_boxes, recg_res

    def database_init(self):
        """初始化数据库，确保所有特征均为128维"""
        with ScopedTiming("database_init", self.debug_mode > 1):
            # 清空原有数据
            self.db_name = []
            self.db_data = []
            self.valid_register_face = 0
            
            try:
                db_file_list = os.listdir(self.database_dir)
            except OSError:
                print(f"数据库目录 {self.database_dir} 不存在，已创建")
                os.makedirs(self.database_dir)
                return
                
            for db_file in db_file_list:
                if not db_file.endswith('.bin'):
                    continue
                if self.valid_register_face >= self.max_register_face:
                    break
                    
                full_db_file = self.database_dir + db_file
                try:
                    with open(full_db_file, 'rb') as f:
                        data = f.read()
                    feature = np.frombuffer(data, dtype=np.float)
                    
                    # 关键修复：统一特征维度为128维
                    if len(feature) > 128:
                        print(f"警告: {db_file} 是{len(feature)}维特征，已截取为128维")
                        feature = feature[:128]
                    elif len(feature) < 128:
                        print(f"警告: {db_file} 特征维度不足128维({len(feature)})，已跳过")
                        continue
                    
                    self.db_data.append(feature)
                    name = db_file.split('.')[0]
                    self.db_name.append(name)
                    self.valid_register_face += 1
                    print(f"已加载 {name} 特征，维度: {len(feature)}")
                    
                except Exception as e:
                    print(f"加载 {db_file} 失败: {e}")
            
            print(f"数据库初始化完成，有效特征数: {self.valid_register_face}")
    def database_reset(self):
        # 数据库清空
        with ScopedTiming("database_reset", self.debug_mode > 1):
            print("database clearing...")
            self.db_name = []
            self.db_data = []
            self.valid_register_face = 0
            print("database clear Done!")

    def database_search(self, feature):
        """搜索数据库，确保特征维度一致"""
        with ScopedTiming("database_search", self.debug_mode > 1):
            v_id = -1
            v_score_max = 0.0
            
            # 关键修复：统一实时特征维度为128维
            if len(feature) > 128:
                print(f"实时特征是{len(feature)}维，已截取为128维")
                feature = feature[:128]
            elif len(feature) < 128:
                print(f"实时特征维度不足128维({len(feature)})，返回未知")
                return 'unknown'
            
            # 特征归一化
            try:
                feature_norm = np.linalg.norm(feature)
                if feature_norm == 0:
                    return 'unknown'
                feature /= feature_norm
            except Exception as e:
                print(f"特征归一化失败: {e}")
                return 'unknown'
            
            # 遍历数据库
            for i in range(self.valid_register_face):
                db_feature = self.db_data[i]
                
                # 验证数据库特征维度
                if len(db_feature) != 128:
                    print(f"数据库特征{i}维度错误({len(db_feature)})，已跳过")
                    continue
                
                # 数据库特征归一化
                try:
                    db_norm = np.linalg.norm(db_feature)
                    if db_norm == 0:
                        continue
                    db_feature_norm = db_feature / db_norm
                except Exception as e:
                    print(f"数据库特征{i}归一化失败: {e}")
                    continue
                
                # 计算相似度（此时维度已确保都是128维）
                try:
                    v_score = np.dot(feature, db_feature_norm) / 2 + 0.5
                    if v_score > v_score_max:
                        v_score_max = v_score
                        v_id = i
                except Exception as e:
                    print(f"相似度计算失败: {e}")
                    print(f"特征维度: {len(feature)}, 数据库特征维度: {len(db_feature_norm)}")
            
            # 结果判断
            if v_id == -1:
                return 'unknown'
            elif v_score_max < self.face_recognition_threshold:
                return 'unknown'
            else:
                return f'name: {self.db_name[v_id]}, score:{v_score_max:.2f}'
    # 绘制识别结果
    def draw_result(self, dets, recg_results):
        self.pl.osd_img.clear()
        if dets:
            for i, det in enumerate(dets):
                # （1）画人脸框
                x1, y1, w, h = map(lambda x: int(round(x, 0)), det[:4])
                x1 = x1 * self.display_size[0] // self.rgb888p_size[0]
                y1 = y1 * self.display_size[1] // self.rgb888p_size[1]
                w = w * self.display_size[0] // self.rgb888p_size[0]
                h = h * self.display_size[1] // self.rgb888p_size[1]
                self.pl.osd_img.draw_rectangle(x1, y1, w, h, color=(255, 0, 0, 255), thickness=1)
                # （2）写人脸识别结果
                recg_text = recg_results[i]
                self.pl.osd_img.draw_string_advanced(x1, y1, 32, recg_text, color=(255, 255, 0, 0))