
from utils import letterbox, non_max_suppression, scale_coords
import torch
import cv2
import onnxruntime
from typing import List, Tuple, Union
import numpy as np


class Yolov5SCatDetector:
    def __init__(self, 
                 onnx_path: str, 
                 device: str = 'auto', 
                 conf_thres: float = 0.25, 
                 iou_thres: float = 0.45):
        """
        Initializes YOLOv5 detector with ONNX runtime.
        
        Args:
            onnx_path: Path to ONNX model file
            device: Compute device ('auto', 'cuda' or 'cpu')
            conf_thres: Confidence threshold [0-1]
            iou_thres: IoU threshold for NMS [0-1]
        """
        # Set device
        if device == 'auto':
            self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
        else:
            self.device = device.lower()
            
        # Initialize ONNX runtime session
        providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] if self.device == 'cuda' else ['CPUExecutionProvider']
        self.session = onnxruntime.InferenceSession(onnx_path, providers=providers)
        
        # Get model metadata
        self.input_name = self.session.get_inputs()[0].name
        self.output_name = self.session.get_outputs()[0].name
        self.input_shape = self.session.get_inputs()[0].shape
        self.output_shape = self.session.get_outputs()[0].shape
        
        self.conf_thres = conf_thres
        self.iou_thres = iou_thres
        self.img_size = self.input_shape[2]  # Assuming square input
        
        print(f"YOLOv5 ONNX cat detecor loaded successfully!")
        print(f"Input shape: {self.input_shape}")
        print(f"Output shape: {self.output_shape}")

    def set_threshold(self, conf_thres: float, iou_thres: float):
        self.conf_thres = conf_thres
        self.iou_thres = iou_thres
        
    def detect(self, 
               cv2img: np.ndarray, 
               classes: Union[List[int], None] = None) -> Union[torch.Tensor, None]:
        """
        Performs cat face detection on input image using ONNX runtime.
        
        Args:
            cv2img: Input image in OpenCV BGR format
            classes: List of class IDs to filter (None for all classes)
            
        Returns:
            Tensor: Detected bounding boxes in format [x1,y1,x2,y2,conf,cls] 
                   or None if detection fails
        """
        # Preprocess image
        img, ratio, (dw, dh) = self._preprocess(cv2img)
        
        # Run inference
        outputs = self.session.run(
            [self.output_name], 
            {self.input_name: img}
        )[0]  # [batch, num_detections, (x,y,w,h,conf,cls...)]
        
        # Post-process
        pred = self._postprocess(outputs, cv2img.shape, ratio, (dw, dh), classes)
        
        return pred

    def _preprocess(self, img: np.ndarray) -> Tuple[np.ndarray, float, Tuple[float, float]]:
        """
        通用预处理：自动适应任意输入尺寸，保持长宽比填充
        Args:
            img: 输入图像 (H,W,3) 格式，BGR通道
        Returns:
            processed_image: 归一化后的[1,3,640,640]张量
            scale: 缩放比例 (original → 640)
            (dw, dh): 两侧填充量（用于后处理坐标还原）
        """
        # 原始尺寸
        h, w = img.shape[:2]
        
        # 计算缩放比例（保持长宽比）
        scale = min(self.img_size / h, self.img_size / w)
        new_h, new_w = int(h * scale), int(w * scale)
        
        # 缩放图像（使用高质量插值）
        resized = cv2.resize(img, (new_w, new_h), interpolation=cv2.INTER_LINEAR)
        
        # 创建填充画布（填充114符合YOLO训练标准）
        canvas = np.full((self.img_size, self.img_size, 3), 114, dtype=np.uint8)
        
        # 计算填充位置（居中）
        dh, dw = (self.img_size - new_h) // 2, (self.img_size - new_w) // 2
        canvas[dh:dh+new_h, dw:dw+new_w] = resized
        
        # 转换为模型输入格式
        canvas = canvas[:, :, ::-1].transpose(2, 0, 1)  # BGR→RGB, HWC→CHW
        canvas = np.ascontiguousarray(canvas, dtype=np.float32) / 255.0
        canvas = np.expand_dims(canvas, axis=0)  # [1,3,640,640]
        
        return canvas, scale, (dw, dh)

    def _postprocess(self, 
                    outputs: np.ndarray, 
                    img_shape: Tuple[int, int], 
                    ratio: Union[float, Tuple[float, float]], 
                    pad: Tuple[float, float], 
                    classes: Union[List[int], None] = None) -> Union[torch.Tensor, None]:
        """
        修正后的后处理函数
        """
        pred = torch.from_numpy(outputs)
        
        # 应用NMS
        pred = non_max_suppression(
            pred,
            conf_thres=self.conf_thres,
            iou_thres=self.iou_thres,
            classes=classes,
            agnostic=False,
            max_det=1000
        )
        
        # 修正判断逻辑 - 检查pred[0]是否为空
        if pred[0].shape[0] == 0:  # 正确检查Tensor是否为空的方式
            return None
        
        # 统一ratio格式
        if isinstance(ratio, tuple):
            w_ratio, h_ratio = ratio
        else:
            w_ratio = h_ratio = ratio
        
        det = pred[0]
        
        # 坐标转换
        det[:, [0, 2]] = (det[:, [0, 2]] - pad[0]) / w_ratio
        det[:, [1, 3]] = (det[:, [1, 3]] - pad[1]) / h_ratio
        
        # 边界保护
        det[:, [0, 2]] = torch.clamp(det[:, [0, 2]], 0, img_shape[1])
        det[:, [1, 3]] = torch.clamp(det[:, [1, 3]], 0, img_shape[0])
        
        return det

class Yolov5SCatFaceDetector:
    def __init__(self, 
                 onnx_path: str, 
                 device: str = 'auto', 
                 conf_thres: float = 0.25, 
                 iou_thres: float = 0.45):
        """
        Initializes YOLOv5 detector with ONNX runtime.
        
        Args:
            onnx_path: Path to ONNX model file
            device: Compute device ('auto', 'cuda' or 'cpu')
            conf_thres: Confidence threshold [0-1]
            iou_thres: IoU threshold for NMS [0-1]
        """
        # Set device
        if device == 'auto':
            self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
        else:
            self.device = device.lower()
            
        # Initialize ONNX runtime session
        providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] if self.device == 'cuda' else ['CPUExecutionProvider']
        self.session = onnxruntime.InferenceSession(onnx_path, providers=providers)
        
        # Get model metadata
        self.input_name = self.session.get_inputs()[0].name
        self.output_name = self.session.get_outputs()[0].name
        self.input_shape = self.session.get_inputs()[0].shape
        self.output_shape = self.session.get_outputs()[0].shape
        
        self.conf_thres = conf_thres
        self.iou_thres = iou_thres
        self.img_size = self.input_shape[2]  # Assuming square input
        
        print(f"YOLOv5 ONNX cat face detector loaded successfully!")
        print(f"Input shape: {self.input_shape}")
        print(f"Output shape: {self.output_shape}")

    def set_threshold(self, conf_thres: float, iou_thres: float):
        self.conf_thres = conf_thres
        self.iou_thres = iou_thres
        
        
    def detect(self, 
               cv2img: np.ndarray, 
               classes: Union[List[int], None] = None) -> Union[torch.Tensor, None]:
        """
        Performs cat face detection on input image using ONNX runtime.
        
        Args:
            cv2img: Input image in OpenCV BGR format
            classes: List of class IDs to filter (None for all classes)
            
        Returns:
            Tensor: Detected bounding boxes in format [x1,y1,x2,y2,conf,cls] 
                   or None if detection fails
        """
        # Preprocess image
        img, ratio, (dw, dh) = self._preprocess(cv2img)
        
        # Run inference
        outputs = self.session.run(
            [self.output_name], 
            {self.input_name: img}
        )[0]  # [batch, num_detections, (x,y,w,h,conf,cls...)]
        
        # Post-process
        pred = self._postprocess(outputs, cv2img.shape, ratio, (dw, dh), classes)
        
        return pred

    def _preprocess(self, img: np.ndarray) -> Tuple[np.ndarray, float, Tuple[float, float]]:
        """
        通用预处理：自动适应任意输入尺寸，保持长宽比填充
        Args:
            img: 输入图像 (H,W,3) 格式，BGR通道
        Returns:
            processed_image: 归一化后的[1,3,640,640]张量
            scale: 缩放比例 (original → 640)
            (dw, dh): 两侧填充量（用于后处理坐标还原）
        """
        # 原始尺寸
        h, w = img.shape[:2]
        
        # 计算缩放比例（保持长宽比）
        scale = min(self.img_size / h, self.img_size / w)
        new_h, new_w = int(h * scale), int(w * scale)
        
        # 缩放图像（使用高质量插值）
        resized = cv2.resize(img, (new_w, new_h), interpolation=cv2.INTER_LINEAR)
        
        # 创建填充画布（填充114符合YOLO训练标准）
        canvas = np.full((self.img_size, self.img_size, 3), 114, dtype=np.uint8)
        
        # 计算填充位置（居中）
        dh, dw = (self.img_size - new_h) // 2, (self.img_size - new_w) // 2
        canvas[dh:dh+new_h, dw:dw+new_w] = resized
        
        # 转换为模型输入格式
        canvas = canvas[:, :, ::-1].transpose(2, 0, 1)  # BGR→RGB, HWC→CHW
        canvas = np.ascontiguousarray(canvas, dtype=np.float32) / 255.0
        canvas = np.expand_dims(canvas, axis=0)  # [1,3,640,640]
        
        return canvas, scale, (dw, dh)

    def _postprocess(self, 
                    outputs: np.ndarray, 
                    img_shape: Tuple[int, int], 
                    ratio: Union[float, Tuple[float, float]], 
                    pad: Tuple[float, float], 
                    classes: Union[List[int], None] = None) -> Union[torch.Tensor, None]:
        """
        修正后的后处理函数
        """
        pred = torch.from_numpy(outputs)
        
        # 应用NMS
        pred = non_max_suppression(
            pred,
            conf_thres=self.conf_thres,
            iou_thres=self.iou_thres,
            classes=classes,
            agnostic=False,
            max_det=1000
        )
        
        # 修正判断逻辑 - 检查pred[0]是否为空
        if pred[0].shape[0] == 0:  # 正确检查Tensor是否为空的方式
            return None
        
        # 统一ratio格式
        if isinstance(ratio, tuple):
            w_ratio, h_ratio = ratio
        else:
            w_ratio = h_ratio = ratio
        
        det = pred[0]
        
        # 坐标转换
        det[:, [0, 2]] = (det[:, [0, 2]] - pad[0]) / w_ratio
        det[:, [1, 3]] = (det[:, [1, 3]] - pad[1]) / h_ratio
        
        # 边界保护
        det[:, [0, 2]] = torch.clamp(det[:, [0, 2]], 0, img_shape[1])
        det[:, [1, 3]] = torch.clamp(det[:, [1, 3]], 0, img_shape[0])
        
        return det