import onnxruntime
import numpy as np
import cv2
import time
from typing import List, Dict, Tuple


class ONNXModel:
    def __init__(self, model_path: str, class_names: List[str] = None):
        # 尝试使用CUDA，失败后自动回退到CPU
        self.provider = 'CPUExecutionProvider'  # 默认使用CPU
        try:
            self.session = onnxruntime.InferenceSession(
                model_path,
                providers=['CUDAExecutionProvider', 'CPUExecutionProvider']
            )
            self.provider = 'CUDAExecutionProvider'
            print("✅ Using CUDA acceleration")
        except:
            self.session = onnxruntime.InferenceSession(
                model_path,
                providers=['CPUExecutionProvider']
            )
            print("⚠️ CUDA not available, falling back to CPU")
        self.class_names = class_names or []
        self.input_name = self.session.get_inputs()[0].name
        self.input_shape = self.session.get_inputs()[0].shape
        self.output_names = [output.name for output in self.session.get_outputs()]

        print(f"✅ Model loaded from {model_path}")
        print(f"Input shape: {self.input_shape}")
        print(f"Output names: {self.output_names}")

    def preprocess(self, image: np.ndarray) -> np.ndarray:
        """图像预处理"""
        img_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        img_resized = cv2.resize(img_rgb, (self.input_shape[3], self.input_shape[2]))
        return img_resized.transpose(2, 0, 1)[np.newaxis].astype(np.float32) / 255.0

    def postprocess(self, outputs: List[np.ndarray], conf_thresh: float = 0.5) -> List[Dict]:
        """后处理输出（适配YOLOv8官方导出格式）"""
        detections = np.squeeze(outputs[0]).transpose(1, 0)  # [6,8400] -> [8400,6]

        results = []
        for det in detections:
            x1, y1, x2, y2, conf, cls_id = det
            if float(conf) < conf_thresh:
                continue

            cls_id = int(cls_id)
            if cls_id >= len(self.class_names):
                continue

            results.append({
                "bbox": [float(x1), float(y1), float(x2), float(y2)],
                "confidence": float(conf),
                "class_id": cls_id,
                "class_name": self.class_names[cls_id]
            })
        return results

    def predict(self, image: np.ndarray) -> Tuple[List[Dict], float]:
        """执行预测"""
        start = time.time()
        input_tensor = self.preprocess(image)
        outputs = self.session.run(self.output_names, {self.input_name: input_tensor})
        results = self.postprocess(outputs)
        return results, (time.time() - start) * 1000


def visualize_and_show(image: np.ndarray, results: List[Dict]) -> bool:
    """
    可视化结果并显示（解决窗口卡住问题）
    返回是否成功显示
    """
    try:
        vis_img = image.copy()
        for det in results:
            x1, y1, x2, y2 = map(int, det["bbox"])
            cv2.rectangle(vis_img, (x1, y1), (x2, y2), (0, 255, 0), 2)
            label = f"{det['class_name']} {det['confidence']:.2f}"
            cv2.putText(vis_img, label, (x1, y1 - 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)

        # 创建独立窗口线程
        cv2.namedWindow("Results", cv2.WINDOW_NORMAL)
        cv2.imshow("Results", vis_img)

        # 添加等待键和销毁窗口逻辑
        print("\nPress any key on the image window to continue...")
        cv2.waitKey(0)
        cv2.destroyAllWindows()
        return True
    except Exception as e:
        print(f"⚠️ Visualization error: {e}")
        return False


if __name__ == "__main__":
    # 配置参数
    MODEL_PATH = "outputs/models/model.onnx"
    # IMAGE_PATH = "data/1.jpeg"
    IMAGE_PATH = "data/raw_images/000000000001 (62).jpg"
    CLASS_NAMES = ["cat", "dog"]  # 必须与训练时类别顺序一致

    try:
        # 1. 初始化模型
        print("Initializing model...")
        model = ONNXModel(MODEL_PATH, CLASS_NAMES)

        # 2. 读取图像
        print("\nLoading image...")
        img = cv2.imread(IMAGE_PATH)
        if img is None:
            raise FileNotFoundError(f"Cannot load image at {IMAGE_PATH}")
        print(f"Image shape: {img.shape}")

        # 3. 执行推理
        print("\nRunning inference...")
        results, infer_time = model.predict(img)
        print(f"Inference time: {infer_time:.2f}ms")

        # 4. 打印结果
        print("\nDetection results:")
        for i, det in enumerate(results, 1):
            print(f"{i}. {det['class_name']} ({det['confidence']:.2f}) at {det['bbox']}")

        # 5. 可视化结果
        print("\nVisualizing results...")
        if not visualize_and_show(img, results):
            # 如果显示失败，保存结果到文件
            output_path = "detection_result.jpg"
            vis_img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
            for det in results:
                x1, y1, x2, y2 = map(int, det["bbox"])
                cv2.rectangle(vis_img, (x1, y1), (x2, y2), (0, 255, 0), 2)
                cv2.putText(vis_img, f"{det['class_name']} {det['confidence']:.2f}",
                            (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
            cv2.imwrite(output_path, vis_img)
            print(f"⚠️ Window display failed. Results saved to {output_path}")
        else:
            # 保存结果
            output_path = "detection_result.jpg"
            cv2.imwrite(output_path, cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
            print(f"\nResults saved to {output_path}")

    except Exception as e:
        print(f"\n❌ Error: {str(e)}")