"""
YOLO人脸检测实现
基于YOLO11模型的人脸检测，包含后处理、NMS等功能
"""
import cv2
import numpy as np
from scipy.special import softmax
import logging
import time
from typing import List, Tuple, Optional

from utils.base_model import BaseModel

logger = logging.getLogger(__name__)


class YOLO11ShapeDetect(BaseModel):
    """
    YOLO11物体检测模型类，继承自BaseModel，负责模型推理输出的解码、后处理、NMS等。
    """
    
    def __init__(self, 
                 model_file: str, 
                 conf: float = 0.6, 
                 iou: float = 0.45):
        """
        初始化YOLO11人脸检测模型
        Args:
            model_file (str): 模型文件路径
            conf (float): 置信度阈值
            iou (float): IOU阈值
        """
        super().__init__(model_file)
        
        # 阈值设置
        self.conf = conf
        self.iou = iou
        self.conf_inverse = -np.log(1/conf - 1)
        
        # 模型配置
        self.input_image_size = 320
        self.dfl_bins = 16
        
        # 类别名称
        self.class_names = ['orange', 'green', 'red']
        
        # 预计算权重和anchor点
        self._prepare_static_data()
        
        logger.info(f"YOLO11物体检测器初始化完成")
        logger.info(f"置信度阈值: {conf}, IOU阈值: {iou}")
        logger.info(f"检测类别: {self.class_names}")
        
        # 检测统计
        self.stats = {
            'total_detections': 0,
            'total_inference_time': 0.0,
            'objects_detected': 0
        }
    
    def _prepare_static_data(self):
        """预计算静态数据"""
        # DFL求期望的系数
        self.weights_static = np.array([i for i in range(self.dfl_bins)]).astype(np.float32)[np.newaxis, np.newaxis, :]
        
        # 三个尺度的anchor点
        # 第一层特征图: 40x40, stride=8
        self.s_anchor = np.stack([
            np.tile(np.linspace(0.5, 39.5, 40), reps=40),
            np.repeat(np.arange(0.5, 40.5, 1), 40)
        ], axis=0).transpose(1,0)
        
        # 第二层特征图: 20x20, stride=16
        self.m_anchor = np.stack([
            np.tile(np.linspace(0.5, 19.5, 20), reps=20),
            np.repeat(np.arange(0.5, 20.5, 1), 20)
        ], axis=0).transpose(1,0)
        
        # 第三层特征图: 10x10, stride=32
        self.l_anchor = np.stack([
            np.tile(np.linspace(0.5, 9.5, 10), reps=10),
            np.repeat(np.arange(0.5, 10.5, 1), 10)
        ], axis=0).transpose(1,0)
        
        logger.debug(f"Anchor shapes: s={self.s_anchor.shape}, m={self.m_anchor.shape}, l={self.l_anchor.shape}")
    
    def detect_shapes(self, frame: np.ndarray) -> Tuple[List[List[int]], List[int], List[float], float, int]:
        """
        检测物体
        Args:
            frame: 输入图像帧
        Returns:
            Tuple[List[List[int]], List[int], List[float], float, int]: (边界框列表, 类别ID列表, 置信度列表, 推理时间, 物体数量)
        """
        start_time = time.time()
        
        try:
            # 1. 预处理：BGR转NV12
            input_tensor = self.bgr2nv12(frame)
            
            # 2. 模型推理
            outputs = self.forward(input_tensor)
            
            # 3. 转换输出格式
            numpy_outputs = self.c2numpy(outputs)
            
            # 4. 后处理：解码、NMS等
            if not numpy_outputs:
                # CPU模拟模式或推理失败
                return self._simulate_detection(frame)
            
            class_ids, confidences, bboxes = self.postProcess(numpy_outputs)
            
            # 转换边界框格式
            shape_boxes = []
            shape_class_ids = []
            shape_confidences = []
            if bboxes is not None and len(bboxes) > 0:
                for i, bbox in enumerate(bboxes):
                    x1, y1, x2, y2 = map(int, bbox)
                    shape_boxes.append([x1, y1, x2, y2])
                    if i < len(class_ids):
                        shape_class_ids.append(int(class_ids[i]))
                    else:
                        shape_class_ids.append(0)
                    if i < len(confidences):
                        shape_confidences.append(float(confidences[i]))
                    else:
                        shape_confidences.append(0.0)
            
            # 计算推理时间
            inference_time = time.time() - start_time
            
            shape_count = len(shape_boxes)
            
            # 更新统计信息
            self._update_stats(inference_time, shape_count)
            
            logger.debug(f"检测到 {shape_count} 个物体，推理时间: {inference_time:.3f}s")
            
            return shape_boxes, shape_class_ids, shape_confidences, inference_time, shape_count
            
        except Exception as e:
            logger.error(f"物体检测失败: {e}")
            return [], [], [], 0.0, 0
    
    def _simulate_detection(self, frame: np.ndarray) -> Tuple[List[List[int]], List[int], List[float], float, int]:
        """CPU模拟检测（仅用于开发测试）"""
        time.sleep(0.02)  # 模拟推理时间
        h, w = frame.shape[:2]
        # 模拟检测到一个物体框（画面中心区域）
        mock_boxes = []
        mock_class_ids = []
        mock_confidences = []
        if np.random.random() > 0.5:  # 50%概率检测到物体
            cx, cy = w // 2, h // 2
            w_obj, h_obj = 120, 150
            x1 = max(0, cx - w_obj // 2)
            y1 = max(0, cy - h_obj // 2)
            x2 = min(w, cx + w_obj // 2)
            y2 = min(h, cy + h_obj // 2)
            mock_boxes.append([x1, y1, x2, y2])
            mock_class_ids.append(np.random.randint(0, 3))  # 随机类别ID (0-2)
            mock_confidences.append(0.85 + np.random.random() * 0.1)  # 模拟置信度0.85-0.95
        
        return mock_boxes, mock_class_ids, mock_confidences, 0.02, len(mock_boxes)
    
    def postProcess(self, outputs: List[np.ndarray]) -> Tuple[List, List, List]:
        """
        对模型输出进行后处理，包括解码、阈值筛选、NMS、坐标还原等。
        Args:
            outputs (List[np.ndarray]): 模型推理输出
        Returns:
            Tuple[List]: (类别ID, 置信度, 边界框)
        """
        begin_time = time.time()
        
        # 检查输出格式
        if len(outputs) < 6:
            logger.warning("模型输出格式不正确，期望6个输出")
            return [], [], []
        
        try:
            # reshape到对应的尺寸 (根据实际模型输出调整顺序)
            s_bboxes = outputs[0].reshape(-1, 64)
            m_bboxes = outputs[1].reshape(-1, 64)
            l_bboxes = outputs[2].reshape(-1, 64)
            s_clses = outputs[3].reshape(-1, 3)
            m_clses = outputs[4].reshape(-1, 3)
            l_clses = outputs[5].reshape(-1, 3)
            
            # 获取置信度最高的索引和分数
            s_max_scores = np.max(s_clses, axis=1)
            s_valid_indices = np.flatnonzero(s_max_scores >= self.conf_inverse)
            s_ids = np.argmax(s_clses[s_valid_indices, :], axis=1)
            s_scores = s_max_scores[s_valid_indices]
            
            m_max_scores = np.max(m_clses, axis=1)
            m_valid_indices = np.flatnonzero(m_max_scores >= self.conf_inverse)
            m_ids = np.argmax(m_clses[m_valid_indices, :], axis=1)
            m_scores = m_max_scores[m_valid_indices]
            
            l_max_scores = np.max(l_clses, axis=1)
            l_valid_indices = np.flatnonzero(l_max_scores >= self.conf_inverse)
            l_ids = np.argmax(l_clses[l_valid_indices, :], axis=1)
            l_scores = l_max_scores[l_valid_indices]
            
            # Sigmoid激活函数计算置信度
            s_scores = 1 / (1 + np.exp(-s_scores))
            m_scores = 1 / (1 + np.exp(-m_scores))
            l_scores = 1 / (1 + np.exp(-l_scores))
            
            # Bounding Box分支处理
            dbboxes_list = []
            
            if len(s_valid_indices) > 0:
                s_bboxes_float32 = s_bboxes[s_valid_indices, :]
                s_ltrb_indices = np.sum(softmax(s_bboxes_float32.reshape(-1, 4, 16), axis=2) * self.weights_static, axis=2)
                s_anchor_indices = self.s_anchor[s_valid_indices, :]
                s_x1y1 = s_anchor_indices - s_ltrb_indices[:, 0:2]
                s_x2y2 = s_anchor_indices + s_ltrb_indices[:, 2:4]
                s_dbboxes = np.hstack([s_x1y1, s_x2y2]) * 8
                dbboxes_list.append(s_dbboxes)
            
            if len(m_valid_indices) > 0:
                m_bboxes_float32 = m_bboxes[m_valid_indices, :]
                m_ltrb_indices = np.sum(softmax(m_bboxes_float32.reshape(-1, 4, 16), axis=2) * self.weights_static, axis=2)
                m_anchor_indices = self.m_anchor[m_valid_indices, :]
                m_x1y1 = m_anchor_indices - m_ltrb_indices[:, 0:2]
                m_x2y2 = m_anchor_indices + m_ltrb_indices[:, 2:4]
                m_dbboxes = np.hstack([m_x1y1, m_x2y2]) * 16
                dbboxes_list.append(m_dbboxes)
            
            if len(l_valid_indices) > 0:
                l_bboxes_float32 = l_bboxes[l_valid_indices, :]
                l_ltrb_indices = np.sum(softmax(l_bboxes_float32.reshape(-1, 4, 16), axis=2) * self.weights_static, axis=2)
                l_anchor_indices = self.l_anchor[l_valid_indices, :]
                l_x1y1 = l_anchor_indices - l_ltrb_indices[:, 0:2]
                l_x2y2 = l_anchor_indices + l_ltrb_indices[:, 2:4]
                l_dbboxes = np.hstack([l_x1y1, l_x2y2]) * 32
                dbboxes_list.append(l_dbboxes)
            
            # 合并所有尺度的结果
            if dbboxes_list:
                dbboxes = np.concatenate(dbboxes_list, axis=0)
                scores = np.concatenate([s_scores, m_scores, l_scores])
                ids = np.concatenate([s_ids, m_ids, l_ids])
            else:
                return [], [], []
            
            # NMS非极大值抑制
            if len(dbboxes) > 0:
                indices = cv2.dnn.NMSBoxes(dbboxes, scores, self.conf, self.iou)
                if len(indices) > 0:
                    # 还原到原始图像尺度
                    bboxes = dbboxes[indices] * np.array([self.x_scale, self.y_scale, self.x_scale, self.y_scale])
                    bboxes = bboxes.astype(np.int32)
                    
                    process_time = time.time() - begin_time
                    logger.debug(f"后处理耗时: {process_time*1000:.2f}ms")
                    
                    return ids[indices], scores[indices], bboxes
            
            return [], [], []
            
            return [], [], []
            
        except Exception as e:
            logger.error(f"后处理失败: {e}")
            return [], [], []
    
    def _update_stats(self, inference_time: float, object_count: int):
        """更新检测统计信息"""
        self.stats['total_detections'] += 1
        self.stats['total_inference_time'] += inference_time
        self.stats['objects_detected'] = object_count
        self.stats['last_inference_time'] = inference_time
    
    def get_stats(self) -> dict:
        """获取检测统计信息"""
        if self.stats['total_detections'] > 0:
            self.stats['average_inference_time'] = (
                self.stats['total_inference_time'] / self.stats['total_detections']
            )
        return self.stats.copy()
    
    def draw_detections(self, frame: np.ndarray, shape_boxes: List[List[int]], 
                       class_ids: List[int] = None,
                       confidences: List[float] = None,
                       show_boxes: bool = True,
                       show_center: bool = True, 
                       show_stats: bool = True) -> np.ndarray:
        """
        在图像上绘制检测结果
        Args:
            frame: 输入图像
            shape_boxes: 物体边界框列表
            class_ids: 类别ID列表
            confidences: 置信度列表
            show_boxes: 是否显示检测框
            show_center: 是否绘制中心点
            show_stats: 是否显示统计信息
        Returns:
            np.ndarray: 绘制后的图像
        """
        result_frame = frame.copy()
        
        if not shape_boxes:
            if show_stats:
                # 即使没有检测到物体也显示统计信息
                stats_text = f"Objects: 0, Time: {self.stats.get('last_inference_time', 0):.3f}s"
                cv2.putText(result_frame, stats_text, (10, 30), 
                           cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 255), 2)
            return result_frame
        
        # 绘制物体框
        if show_boxes:
            # 类别颜色定义
            class_colors = [
                (0, 165, 255),  # 橙色 - orange
                (0, 255, 0),    # 绿色 - green
                (0, 0, 255)     # 红色 - red
            ]
            
            for i, bbox in enumerate(shape_boxes):
                x1, y1, x2, y2 = bbox
                
                # 获取类别ID和颜色
                class_id = class_ids[i] if class_ids and i < len(class_ids) else 0
                color = class_colors[class_id % len(class_colors)]
                
                # 绘制边界框
                cv2.rectangle(result_frame, (x1, y1), (x2, y2), color, 2)
                
                # 绘制标签和置信度
                confidence = confidences[i] if confidences and i < len(confidences) else 0.0
                class_name = self.class_names[class_id] if class_id < len(self.class_names) else f"Class {class_id}"
                label = f"{class_name}: {confidence:.2f}"
                label_size = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.6, 2)[0]
                cv2.rectangle(result_frame, (x1, y1-label_size[1]-10), 
                             (x1+label_size[0], y1), color, -1)
                cv2.putText(result_frame, label, (x1, y1-5), 
                           cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 0), 2)
                
                # 绘制中心点
                if show_center:
                    center_x = (x1 + x2) // 2
                    center_y = (y1 + y2) // 2
                    cv2.circle(result_frame, (center_x, center_y), 5, (0, 0, 255), -1)
        
        # 绘制画面中心点
        if show_center:
            h, w = result_frame.shape[:2]
            cv2.circle(result_frame, (w//2, h//2), 8, (255, 0, 0), 2)
            cv2.line(result_frame, (w//2-15, h//2), (w//2+15, h//2), (255, 0, 0), 2)
            cv2.line(result_frame, (w//2, h//2-15), (w//2, h//2+15), (255, 0, 0), 2)
        
        # 添加统计信息
        if show_stats:
            stats_text = f"Objects: {len(shape_boxes)}, Time: {self.stats.get('last_inference_time', 0):.3f}s"
            cv2.putText(result_frame, stats_text, (10, 30), 
                       cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 255), 2)
        
        return result_frame