import cv2
import numpy as np
from math import ceil
from itertools import product as product
from rknn.api import RKNN

class RetinaFaceDetector:
    """RetinaFace人脸检测模型，支持传入原始图片"""
    
    def __init__(self, input_size=(320, 320), target='rk3588'):
        self.model_path = "./model/RetinaFace_mobile320.rknn"
        self.input_size = input_size
        self.target = target
        self.rknn = RKNN()
        self._load_model()
    
    def _load_model(self):
        if self.target != None:
            ret = self.rknn.load_rknn(self.model_path)
            if ret != 0:
                raise Exception(f"加载模型失败: {self.model_path}")    
            ret = self.rknn.init_runtime(target=self.target)
            if ret != 0:
                raise Exception("初始化运行环境失败")
        else:
            # 配置模型构建参数
            ret = self.rknn.config(mean_values=[0.0, 0.0, 0.0], std_values=[1.0, 1.0, 1.0], quantized_dtype='w8a8', target_platform='rk3588')
            if ret != 0:
                print('配置模型失败')
                exit(ret)
            # 加载原始模型（如ONNX）
            ret = self.rknn.load_onnx(model='./model/RetinaFace_mobile320.onnx')
            if ret != 0:
                print('加载ONNX模型失败')
                exit(ret)
            # 构建模型（指定target_platform为模拟器）
            ret = self.rknn.build(do_quantization=True, dataset='./model/dataset_RetinaFace.txt')
            if ret != 0:
                print('构建模型失败')
                exit(ret)
            # 初始化运行时环境（不指定target即为模拟器）
            ret = self.rknn.init_runtime()
            if ret != 0:
                print('初始化运行时环境失败')
                exit(ret)
            # Export rknn model
            print('--> Export rknn model')
            ret = self.rknn.export_rknn("./model/RetinaFace_mobile320.rknn")
            if ret != 0:
                print('Export rknn model failed!')
                exit(ret)
            print('done')
    
    def detect(self, image, score_threshold=0.5, nms_threshold=0.5):
        """
        检测人脸（支持传入原始图片）
        
        Args:
            image: numpy数组，BGR格式图像
        Returns:
            results: 人脸检测结果数组
        """
        if image is None or image.size == 0:
            return np.array([])
            
        img_height, img_width = image.shape[:2]
        
        # 图像预处理
        letterbox_img, aspect_ratio, offset_x, offset_y = self._letterbox_resize(image)
        infer_img = letterbox_img[..., ::-1]  # BGR转RGB
        
        # 模型推理
        outputs = self.rknn.inference(inputs=[infer_img])
    	# outputs = self.rknn.inference(inputs=[infer_img], data_format='nhwc')  # default value is 'nhwc'!
        loc, conf, landmarks = outputs
        
        # 解码预测结果
        priors = self._prior_box(self.input_size)
        boxes = self._box_decode(loc.squeeze(0), priors)
        boxes = boxes * np.array([self.input_size[1], self.input_size[0]] * 2)
        
        # 坐标还原到原始图像
        boxes[..., 0::2] = np.clip((boxes[..., 0::2] - offset_x) / aspect_ratio, 0, img_width)
        boxes[..., 1::2] = np.clip((boxes[..., 1::2] - offset_y) / aspect_ratio, 0, img_height)
        
        scores = conf.squeeze(0)[:, 1]  # 人脸置信度
        
        # 关键点解码与还原
        landmarks = self._decode_landmarks(landmarks.squeeze(0), priors)
        landmarks = landmarks * np.array([self.input_size[1], self.input_size[0]] * 5)
        landmarks[..., 0::2] = np.clip((landmarks[..., 0::2] - offset_x) / aspect_ratio, 0, img_width)
        landmarks[..., 1::2] = np.clip((landmarks[..., 1::2] - offset_y) / aspect_ratio, 0, img_height)
        
        # 过滤低置信度结果
        inds = np.where(scores > score_threshold)[0]
        boxes = boxes[inds]
        landmarks = landmarks[inds]
        scores = scores[inds]
        
        # 非极大值抑制
        dets = np.hstack((boxes, scores[:, np.newaxis])).astype(np.float32)
        keep = self._nms(dets, nms_threshold)
        dets = dets[keep, :]
        landmarks = landmarks[keep]
        
        # 组合最终结果：[x1,y1,x2,y2,score,landmarks...]
        results = np.concatenate((dets, landmarks), axis=1)
        return results
    
    def _letterbox_resize(self, image):
        img_height, img_width = image.shape[:2]
        target_height, target_width = self.input_size
        
        aspect_ratio = min(target_width / img_width, target_height / img_height)
        new_width = int(img_width * aspect_ratio)
        new_height = int(img_height * aspect_ratio)
        
        resized_img = cv2.resize(image, (new_width, new_height), interpolation=cv2.INTER_AREA)
        
        padded_img = np.ones((target_height, target_width, 3), dtype=np.uint8) * 114
        offset_x = (target_width - new_width) // 2
        offset_y = (target_height - new_height) // 2
        padded_img[offset_y:offset_y+new_height, offset_x:offset_x+new_width] = resized_img
        
        return padded_img, aspect_ratio, offset_x, offset_y
    
    def _prior_box(self, image_size):
        min_sizes = [[16, 32], [64, 128], [256, 512]]
        steps = [8, 16, 32]
        feature_maps = [[ceil(image_size[0]/step), ceil(image_size[1]/step)] for step in steps]
        
        anchors = []
        for k, f in enumerate(feature_maps):
            for i, j in product(range(f[0]), range(f[1])):
                for min_size in min_sizes[k]:
                    s_kx = min_size / image_size[1]
                    s_ky = min_size / image_size[0]
                    dense_cx = [x * steps[k] / image_size[1] for x in [j + 0.5]]
                    dense_cy = [y * steps[k] / image_size[0] for y in [i + 0.5]]
                    for cy, cx in product(dense_cy, dense_cx):
                        anchors += [cx, cy, s_kx, s_ky]
        
        return np.array(anchors).reshape(-1, 4)
    
    def _box_decode(self, loc, priors):
        variances = [0.1, 0.2]
        boxes = np.concatenate((
            priors[:, :2] + loc[:, :2] * variances[0] * priors[:, 2:],
            priors[:, 2:] * np.exp(loc[:, 2:] * variances[1])), axis=1)
        boxes[:, :2] -= boxes[:, 2:] / 2
        boxes[:, 2:] += boxes[:, :2]
        return boxes
    
    def _decode_landmarks(self, pre, priors):
        variances = [0.1, 0.2]
        landmarks = np.concatenate((
            priors[:, :2] + pre[:, :2] * variances[0] * priors[:, 2:],
            priors[:, :2] + pre[:, 2:4] * variances[0] * priors[:, 2:],
            priors[:, :2] + pre[:, 4:6] * variances[0] * priors[:, 2:],
            priors[:, :2] + pre[:, 6:8] * variances[0] * priors[:, 2:],
            priors[:, :2] + pre[:, 8:10] * variances[0] * priors[:, 2:]
        ), axis=1)
        return landmarks
    
    def _nms(self, dets, thresh):
        x1 = dets[:, 0]
        y1 = dets[:, 1]
        x2 = dets[:, 2]
        y2 = dets[:, 3]
        scores = dets[:, 4]
        
        areas = (x2 - x1 + 1) * (y2 - y1 + 1)
        order = scores.argsort()[::-1]
        
        keep = []
        while order.size > 0:
            i = order[0]
            keep.append(i)
            xx1 = np.maximum(x1[i], x1[order[1:]])
            yy1 = np.maximum(y1[i], y1[order[1:]])
            xx2 = np.minimum(x2[i], x2[order[1:]])
            yy2 = np.minimum(y2[i], y2[order[1:]])
            
            w = np.maximum(0.0, xx2 - xx1 + 1)
            h = np.maximum(0.0, yy2 - yy1 + 1)
            inter = w * h
            ovr = inter / (areas[i] + areas[order[1:]] - inter)
            
            inds = np.where(ovr <= thresh)[0]
            order = order[inds + 1]
            
        return keep
    
    def release(self):
        if self.rknn:
            self.rknn.release()
