import argparse
import subprocess
import gc
from typing import List, Tuple, Dict
import cv2
import numpy as np
import onnxruntime as ort

# 类别映射
class_mapping = {
    0: "hero",
    1: "monster",
    2: "goods",
    3: "boos",
    4: "door",
    5: "brand",
    6: "again"
}


def is_cuda_available() -> bool:
    try:
        # 减少subprocess调用的开销，仅检查返回码
        result = subprocess.run(
            ['nvidia-smi'],
            stdout=subprocess.PIPE,
            stderr=subprocess.PIPE,
            text=True,
            check=False  # 避免抛出异常，仅通过返回码判断
        )
        return result.returncode == 0
    except (FileNotFoundError, PermissionError):
        return False


def check_requirements(package: str) -> None:
    try:
        __import__(package.split('-')[0])
    except ImportError:
        raise ImportError(f"请安装依赖: pip install {package}")


class YOLOv8:
    def __init__(self, onnx_model: str, input_image: str, confidence_thres: float, iou_thres: float):
        self.onnx_model = onnx_model
        self.input_image = input_image
        self.confidence_thres = confidence_thres
        self.iou_thres = iou_thres
        self.classes = class_mapping
        self.color_palette = None  # 延迟初始化，仅在需要绘制时创建
        self.detections = []
        # 预初始化输入尺寸变量
        self.input_width = 0
        self.input_height = 0
        self.img_height = 0
        self.img_width = 0
        # 预加载模型会话
        self.session = None

    def _init_color_palette(self):
        """延迟初始化颜色 palette，仅在需要绘制时调用"""
        if self.color_palette is None:
            self.color_palette = np.random.uniform(0, 255, size=(len(self.classes), 3))

    def letterbox(self, img: np.ndarray, new_shape: Tuple[int, int] = (640, 640)) -> Tuple[np.ndarray, Tuple[int, int]]:
        shape = img.shape[:2]
        r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
        new_unpad = (int(round(shape[1] * r)), int(round(shape[0] * r)))
        dw, dh = (new_shape[1] - new_unpad[0]) / 2, (new_shape[0] - new_unpad[1]) / 2

        # 使用INTER_AREA插值，在缩小图像时更高效
        if shape[::-1] != new_unpad:
            img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_AREA)
        top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
        left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
        # 使用边界填充，避免创建不必要的副本
        img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=(114, 114, 114))
        return img, (top, left)

    def draw_detections(self, img: np.ndarray, box: List[float], score: float, class_id: int) -> None:
        self._init_color_palette()  # 按需初始化颜色
        x1, y1, w, h = box
        color = self.color_palette[class_id]
        cv2.rectangle(img, (int(x1), int(y1)), (int(x1 + w), int(y1 + h)), color, 2)

        label = f"{self.classes[class_id]}: {score:.2f}"
        (label_width, label_height), _ = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1)
        label_x, label_y = x1, y1 - 10 if y1 > label_height else y1 + label_height

        cv2.rectangle(
            img, (label_x, label_y - label_height), (label_x + label_width, label_y), color, cv2.FILLED
        )
        cv2.putText(img, label, (label_x, label_y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1)

    def preprocess(self) -> Tuple[np.ndarray, Tuple[int, int]]:
        # 直接读取图像，避免后续不必要的转换
        img = cv2.imread(self.input_image)
        if img is None:
            raise FileNotFoundError(f"无法加载图像: {self.input_image}")

        self.img_height, self.img_width = img.shape[:2]
        # 转换为RGB（原地操作减少内存）
        img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB, dst=None)
        img_resized, pad = self.letterbox(img_rgb, (self.input_width, self.input_height))

        # 归一化（使用原地操作）
        img_data = img_resized.astype(np.float32)
        img_data /= 255.0  # 原地除法，减少内存分配

        # 转换维度并添加批次维度（使用 contiguous 确保内存连续性，提升推理速度）
        img_data = np.transpose(img_data, (2, 0, 1))
        img_data = np.expand_dims(img_data, axis=0)
        return np.ascontiguousarray(img_data), pad  # 确保内存连续

    def postprocess(self, input_image: np.ndarray, output: List[np.ndarray], pad: Tuple[int, int]) -> np.ndarray:
        # 处理输出，减少中间变量
        outputs = np.transpose(np.squeeze(output[0]))
        rows = outputs.shape[0]

        # 移除 reserve() 调用，Python 列表无需手动预分配容量
        boxes = []
        scores = []
        class_ids = []
        self.detections = []

        # 计算缩放因子（只计算一次）
        gain = min(self.input_height / self.img_height, self.input_width / self.img_width)
        # 向量化操作修正坐标，替代循环内计算
        outputs[:, 0] -= pad[1]
        outputs[:, 1] -= pad[0]

        for i in range(rows):
            classes_scores = outputs[i][4:]
            max_score = np.amax(classes_scores)

            if max_score >= self.confidence_thres:
                class_id = np.argmax(classes_scores)
                x, y, w, h = outputs[i][:4]

                # 计算坐标（合并计算步骤）
                left = int((x - w / 2) / gain)
                top = int((y - h / 2) / gain)
                width = int(w / gain)
                height = int(h / gain)
                center_x = left + width / 2
                center_y = top + height / 2
                bottom_y = top + height

                class_ids.append(class_id)
                scores.append(max_score)
                boxes.append([left, top, width, height])
                self.detections.append({
                    "class_id": int(class_id),
                    "class_name": self.classes[class_id],
                    "confidence": float(max_score),
                    "box": [left, top, width, height],
                    "center": [center_x, center_y],
                    "bottom": bottom_y
                })

        # NMS处理
        if boxes:
            indices = cv2.dnn.NMSBoxes(boxes, scores, self.confidence_thres, self.iou_thres)

            # 处理indices格式
            if len(indices.shape) > 1:
                indices = indices.flatten()

            # 使用列表推导式更高效地筛选结果
            filtered_indices = [self.detections[i] for i in indices]
            filtered_boxes = [boxes[i] for i in indices]
            filtered_scores = [scores[i] for i in indices]
            filtered_class_ids = [class_ids[i] for i in indices]

            # 更新结果
            self.detections = filtered_indices

            # 绘制检测框
            for box, score, class_id in zip(filtered_boxes, filtered_scores, filtered_class_ids):
                self.draw_detections(input_image, box, score, class_id)

        return input_image

    def get_detections(self) -> List[Dict]:
        return self.detections

    def main(self) -> Tuple[np.ndarray, List[Dict]]:
        # 配置ONNX Runtime优化参数
        sess_options = ort.SessionOptions()
        sess_options.intra_op_num_threads = 4  # 根据CPU核心数调整，减少线程切换开销
        sess_options.graph_optimization_level = ort.GraphOptimizationLevel.ORT_ENABLE_ALL  # 启用所有图优化

        # 初始化推理会话（只创建一次）
        if self.session is None:
            providers = ["CUDAExecutionProvider", "CPUExecutionProvider"] if is_cuda_available() else [
                "CPUExecutionProvider"]
            print(f"预测推理{providers}")
            self.session = ort.InferenceSession(self.onnx_model, sess_options, providers=providers)

        # 获取输入尺寸
        input_shape = self.session.get_inputs()[0].shape
        self.input_width, self.input_height = input_shape[2], input_shape[3]

        # 预处理
        img_data, pad = self.preprocess()

        # 推理
        outputs = self.session.run(None, {self.session.get_inputs()[0].name: img_data})

        # used_providers = self.session.get_providers()
        # # 检查是否使用了CUDAExecutionProvider（GPU加速）
        # if any(provider == 'CUDAExecutionProvider' for provider in used_providers):
        #     print(f"使用了GPU")
        # else:
        #     print(f"使用了CPU")

        # 读取原图用于绘制（只读取一次）
        input_image = cv2.imread(self.input_image)

        # 后处理
        output_image = self.postprocess(input_image, outputs, pad)

        # 及时释放大内存对象
        del img_data, outputs
        gc.collect()  # 触发垃圾回收

        return output_image, self.get_detections()


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("--model", type=str, default="../../game/model/best.onnx", help="ONNX模型路径")
    parser.add_argument("--img", type=str, default="2.png", help="输入图像路径")
    parser.add_argument("--conf-thres", type=float, default=0.90, help="置信度阈值")
    parser.add_argument("--iou-thres", type=float, default=0.5, help="NMS IoU阈值")
    args = parser.parse_args()

    check_requirements("onnxruntime-gpu" if is_cuda_available() else "onnxruntime")

    # 创建检测器实例（可复用）
    detector = YOLOv8(args.model, args.img, args.conf_thres, args.iou_thres)

    output_img, detections = detector.main()

    # 打印结果
    data = [
        (
            int(det['class_id']),  # 类别ID（确保为整数）
            [
                det['center'][0],  # 中心点x坐标
                det['bottom']  # 底部y坐标（对应原始结构中的box.xyxy[0][3]）
            ]
        )
        for det in detections  # 遍历所有检测结果
    ]
    print(f"我自己的格式{data}")

    # 显示结果
    cv2.imshow("检测结果", output_img)
    cv2.waitKey(0)
    cv2.destroyAllWindows()

    # 手动释放资源（对于批量处理场景很重要）
    del detector, output_img, detections
    gc.collect()
