"""
YOLOv8文本区域检测模块
使用OpenVINO优化的YOLOv8模型进行文本区域检测
"""

import cv2
import numpy as np
import time
from typing import List, Tuple, Optional, Dict, Any
from dataclasses import dataclass
from pathlib import Path
import openvino as ov
from ultralytics import YOLO
import onnxruntime as ort

from ..core import config, detection_logger


@dataclass
class Detection:
    """检测结果数据结构"""
    bbox: Tuple[int, int, int, int]  # x1, y1, x2, y2
    confidence: float
    class_id: int
    class_name: str


@dataclass
class DetectionResult:
    """检测结果集合"""
    detections: List[Detection]
    inference_time: float
    image_shape: Tuple[int, int]  # height, width
    timestamp: float


class YOLODetector:
    """YOLOv8文本检测器"""
    
    def __init__(self, model_path: Optional[str] = None):
        self.model_path = model_path or config.yolo_detection.model_path
        self.confidence_threshold = config.yolo_detection.confidence_threshold
        self.iou_threshold = config.yolo_detection.iou_threshold
        self.max_detections = config.yolo_detection.max_detections_per_frame
        self.input_size = tuple(config.yolo_detection.input_size)
        self.device = config.yolo_detection.openvino_device
        
        self.model = None
        self.compiled_model = None
        self.input_layer = None
        self.output_layer = None
        self.class_names = ["text"]  # 文本检测只有一个类别
        
        self._load_model()
        detection_logger.info("YOLOv8检测器初始化完成")
    
    def _load_model(self):
        """加载模型"""
        try:
            model_file = Path(self.model_path)
            if not model_file.exists():
                detection_logger.error(f"模型文件不存在: {self.model_path}")
                raise FileNotFoundError(f"模型文件不存在: {self.model_path}")
            
            # 检查模型格式
            if model_file.suffix.lower() == '.onnx':
                self._load_onnx_model()
            elif model_file.suffix.lower() in ['.pt', '.pth']:
                self._load_pytorch_model()
            else:
                detection_logger.error(f"不支持的模型格式: {model_file.suffix}")
                raise ValueError(f"不支持的模型格式: {model_file.suffix}")
            
            detection_logger.info(f"成功加载模型: {self.model_path}")
            
        except Exception as e:
            detection_logger.error(f"加载模型失败: {e}")
            raise
    
    def _load_onnx_model(self):
        """加载ONNX模型并使用OpenVINO优化"""
        try:
            # 使用OpenVINO加载ONNX模型
            core = ov.Core()
            model = core.read_model(self.model_path)
            
            # 编译模型
            self.compiled_model = core.compile_model(model, self.device)
            
            # 获取输入输出层信息
            self.input_layer = self.compiled_model.input(0)
            self.output_layer = self.compiled_model.output(0)
            
            detection_logger.info(f"使用OpenVINO加载ONNX模型，设备: {self.device}")
            detection_logger.info(f"输入形状: {self.input_layer.shape}")
            detection_logger.info(f"输出形状: {self.output_layer.shape}")
            
        except Exception as e:
            detection_logger.warning(f"OpenVINO加载失败，尝试使用ONNX Runtime: {e}")
            self._load_onnx_with_ort()
    
    def _load_onnx_with_ort(self):
        """使用ONNX Runtime加载模型"""
        try:
            # 设置ONNX Runtime提供者
            providers = ['CPUExecutionProvider']
            if self.device.upper() == 'GPU':
                providers.insert(0, 'CUDAExecutionProvider')
            
            self.model = ort.InferenceSession(self.model_path, providers=providers)
            
            # 获取输入输出信息
            self.input_layer = self.model.get_inputs()[0]
            self.output_layer = self.model.get_outputs()[0]
            
            detection_logger.info(f"使用ONNX Runtime加载模型")
            detection_logger.info(f"输入形状: {self.input_layer.shape}")
            detection_logger.info(f"输出形状: {self.output_layer.shape}")
            
        except Exception as e:
            detection_logger.error(f"ONNX Runtime加载失败: {e}")
            raise
    
    def _load_pytorch_model(self):
        """加载PyTorch模型"""
        try:
            self.model = YOLO(self.model_path)
            detection_logger.info("使用Ultralytics YOLO加载PyTorch模型")
            
        except Exception as e:
            detection_logger.error(f"PyTorch模型加载失败: {e}")
            raise
    
    def preprocess(self, image: np.ndarray) -> Tuple[np.ndarray, float, float]:
        """预处理图像"""
        original_height, original_width = image.shape[:2]
        
        # 调整图像大小
        input_height, input_width = self.input_size
        resized_image = cv2.resize(image, (input_width, input_height))
        
        # 归一化
        input_image = resized_image.astype(np.float32) / 255.0
        
        # 转换为CHW格式
        input_image = np.transpose(input_image, (2, 0, 1))
        
        # 添加batch维度
        input_image = np.expand_dims(input_image, axis=0)
        
        # 计算缩放比例
        scale_x = original_width / input_width
        scale_y = original_height / input_height
        
        return input_image, scale_x, scale_y
    
    def postprocess(self, outputs: np.ndarray, scale_x: float, scale_y: float, 
                   original_shape: Tuple[int, int]) -> List[Detection]:
        """后处理检测结果"""
        detections = []
        
        # 解析输出格式 (batch_size, num_detections, 85)
        # 85 = 4(bbox) + 1(confidence) + 80(classes)
        if len(outputs.shape) == 3:
            outputs = outputs[0]  # 移除batch维度
        
        # 过滤低置信度检测
        confidences = outputs[:, 4]
        valid_indices = confidences > self.confidence_threshold
        valid_outputs = outputs[valid_indices]
        
        if len(valid_outputs) == 0:
            return detections
        
        # 提取边界框和置信度
        boxes = valid_outputs[:, :4]
        confidences = valid_outputs[:, 4]
        
        # 如果有类别信息，提取类别分数
        if valid_outputs.shape[1] > 5:
            class_scores = valid_outputs[:, 5:]
            class_ids = np.argmax(class_scores, axis=1)
            class_confidences = np.max(class_scores, axis=1)
            # 总置信度 = 目标置信度 * 类别置信度
            confidences = confidences * class_confidences
        else:
            class_ids = np.zeros(len(boxes), dtype=int)
        
        # 转换边界框格式 (center_x, center_y, width, height) -> (x1, y1, x2, y2)
        x_center, y_center, width, height = boxes[:, 0], boxes[:, 1], boxes[:, 2], boxes[:, 3]
        x1 = (x_center - width / 2) * scale_x
        y1 = (y_center - height / 2) * scale_y
        x2 = (x_center + width / 2) * scale_x
        y2 = (y_center + height / 2) * scale_y
        
        # 限制边界框在图像范围内
        original_height, original_width = original_shape
        x1 = np.clip(x1, 0, original_width)
        y1 = np.clip(y1, 0, original_height)
        x2 = np.clip(x2, 0, original_width)
        y2 = np.clip(y2, 0, original_height)
        
        # 应用NMS
        boxes_for_nms = np.column_stack([x1, y1, x2, y2])
        indices = cv2.dnn.NMSBoxes(
            boxes_for_nms.tolist(),
            confidences.tolist(),
            self.confidence_threshold,
            self.iou_threshold
        )
        
        if len(indices) > 0:
            indices = indices.flatten()
            
            # 限制检测数量
            if len(indices) > self.max_detections:
                # 按置信度排序，取前N个
                sorted_indices = indices[np.argsort(confidences[indices])[::-1]]
                indices = sorted_indices[:self.max_detections]
            
            for i in indices:
                detection = Detection(
                    bbox=(int(x1[i]), int(y1[i]), int(x2[i]), int(y2[i])),
                    confidence=float(confidences[i]),
                    class_id=int(class_ids[i]),
                    class_name=self.class_names[class_ids[i]] if class_ids[i] < len(self.class_names) else "unknown"
                )
                detections.append(detection)
        
        return detections
    
    def detect(self, image: np.ndarray) -> DetectionResult:
        """执行检测"""
        start_time = time.time()

        try:
            # 预处理
            input_image, scale_x, scale_y = self.preprocess(image)
            original_shape = image.shape[:2]

            # 推理
            if self.compiled_model:  # OpenVINO
                outputs = self.compiled_model([input_image])[self.output_layer]
            elif hasattr(self.model, 'run'):  # ONNX Runtime
                input_name = self.input_layer.name
                outputs = self.model.run(None, {input_name: input_image})[0]
            else:  # Ultralytics YOLO
                results = self.model(image, conf=self.confidence_threshold, iou=self.iou_threshold)
                return self._parse_ultralytics_results(results[0], start_time)

            # 后处理
            detections = self.postprocess(outputs, scale_x, scale_y, original_shape)

            # 合并重叠的检测框
            detections = self.merge_overlapping_boxes(detections, self.iou_threshold)

            inference_time = time.time() - start_time

            return DetectionResult(
                detections=detections,
                inference_time=inference_time,
                image_shape=original_shape,
                timestamp=time.time()
            )

        except Exception as e:
            detection_logger.error(f"检测失败: {e}")
            return DetectionResult(
                detections=[],
                inference_time=time.time() - start_time,
                image_shape=image.shape[:2],
                timestamp=time.time()
            )
    
    def _parse_ultralytics_results(self, result, start_time: float) -> DetectionResult:
        """解析Ultralytics YOLO结果"""
        detections = []
        
        if result.boxes is not None:
            boxes = result.boxes.xyxy.cpu().numpy()
            confidences = result.boxes.conf.cpu().numpy()
            class_ids = result.boxes.cls.cpu().numpy().astype(int)
            
            for i, (box, conf, cls_id) in enumerate(zip(boxes, confidences, class_ids)):
                if i >= self.max_detections:
                    break
                
                detection = Detection(
                    bbox=(int(box[0]), int(box[1]), int(box[2]), int(box[3])),
                    confidence=float(conf),
                    class_id=int(cls_id),
                    class_name=self.class_names[cls_id] if cls_id < len(self.class_names) else "unknown"
                )
                detections.append(detection)
        
        inference_time = time.time() - start_time
        
        return DetectionResult(
            detections=detections,
            inference_time=inference_time,
            image_shape=result.orig_shape,
            timestamp=time.time()
        )
    
    def merge_overlapping_boxes(self, detections: List[Detection], 
                               iou_threshold: float = 0.7) -> List[Detection]:
        """合并重叠的检测框"""
        if len(detections) <= 1:
            return detections
        
        # 按置信度排序
        detections.sort(key=lambda x: x.confidence, reverse=True)
        
        merged = []
        used = [False] * len(detections)
        
        for i, det1 in enumerate(detections):
            if used[i]:
                continue
            
            # 找到所有与当前检测框重叠的框
            overlapping = [det1]
            used[i] = True
            
            for j, det2 in enumerate(detections[i+1:], i+1):
                if used[j]:
                    continue
                
                iou = self._calculate_iou(det1.bbox, det2.bbox)
                if iou >= iou_threshold:
                    overlapping.append(det2)
                    used[j] = True
            
            # 合并重叠的框
            if len(overlapping) > 1:
                merged_box = self._merge_boxes(overlapping)
                merged.append(merged_box)
            else:
                merged.append(det1)
        
        return merged
    
    def _calculate_iou(self, box1: Tuple[int, int, int, int], 
                      box2: Tuple[int, int, int, int]) -> float:
        """计算两个边界框的IoU"""
        x1_1, y1_1, x2_1, y2_1 = box1
        x1_2, y1_2, x2_2, y2_2 = box2
        
        # 计算交集
        x1_inter = max(x1_1, x1_2)
        y1_inter = max(y1_1, y1_2)
        x2_inter = min(x2_1, x2_2)
        y2_inter = min(y2_1, y2_2)
        
        if x2_inter <= x1_inter or y2_inter <= y1_inter:
            return 0.0
        
        inter_area = (x2_inter - x1_inter) * (y2_inter - y1_inter)
        
        # 计算并集
        area1 = (x2_1 - x1_1) * (y2_1 - y1_1)
        area2 = (x2_2 - x1_2) * (y2_2 - y1_2)
        union_area = area1 + area2 - inter_area
        
        return inter_area / union_area if union_area > 0 else 0.0
    
    def _merge_boxes(self, detections: List[Detection]) -> Detection:
        """合并多个检测框"""
        # 使用加权平均合并边界框
        total_confidence = sum(det.confidence for det in detections)
        
        x1 = sum(det.bbox[0] * det.confidence for det in detections) / total_confidence
        y1 = sum(det.bbox[1] * det.confidence for det in detections) / total_confidence
        x2 = sum(det.bbox[2] * det.confidence for det in detections) / total_confidence
        y2 = sum(det.bbox[3] * det.confidence for det in detections) / total_confidence
        
        # 使用最高置信度的类别和置信度
        best_detection = max(detections, key=lambda x: x.confidence)
        
        return Detection(
            bbox=(int(x1), int(y1), int(x2), int(y2)),
            confidence=best_detection.confidence,
            class_id=best_detection.class_id,
            class_name=best_detection.class_name
        )
