import onnxruntime as ort
import cv2
import numpy as np
import matplotlib.pyplot as plt

def preprocess_image(image_path, target_size=(768, 1024)):
    # 预处理（保持你的原始代码不变）
    img = cv2.imread(image_path)
    orig_img = img.copy()  # 保存原始图像用于可视化
    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

    h, w = img.shape[:2]
    scale = min(target_size[1]/h, target_size[0]/w)
    new_h, new_w = int(h * scale), int(w * scale)
    img = cv2.resize(img, (new_w, new_h))

    top = (target_size[1] - new_h) // 2
    bottom = target_size[1] - new_h - top
    left = (target_size[0] - new_w) // 2
    right = target_size[0] - new_w - left
    img = cv2.copyMakeBorder(img, top, bottom, left, right,
                             cv2.BORDER_CONSTANT, value=(114, 114, 114))

    img = img.astype(np.float32) / 255.0
    img = np.transpose(img, (2, 0, 1))
    img = np.expand_dims(img, axis=0)
    return orig_img, img, (scale, (left, top))  # 返回原始图像和预处理信息

def postprocess(outputs, conf_thresh=0.5):
    """解析模型输出为检测框"""
    # 假设输出格式为 [batch, num_det, 5+num_classes]
    detections = outputs[0][0]
    boxes = []
    scores = []
    class_ids = []

    for det in detections:
        confidence = det[4]
        if confidence < conf_thresh:
            continue

        # 获取类别ID
        class_id = np.argmax(det[5:])

        # 转换框坐标 (cx, cy, w, h) -> (x1, y1, x2, y2)
        cx, cy, width, height = det[:4]
        x1 = cx - width / 2
        y1 = cy - height / 2
        x2 = cx + width / 2
        y2 = cy + height / 2

        boxes.append([x1, y1, x2, y2])
        scores.append(float(confidence))
        class_ids.append(class_id)

    return boxes, scores, class_ids

def visualize(orig_img, boxes, scores, class_ids, scale, padding):
    """可视化检测结果"""
    # 转换框坐标到原始图像尺寸
    h, w = orig_img.shape[:2]
    scale_h, scale_w = scale, scale
    pad_left, pad_top = padding

    for box, score, class_id in zip(boxes, scores, class_ids):
        # 调整框坐标（去除填充并缩放回原始尺寸）
        x1 = int((box[0] - pad_left) / scale_w)
        y1 = int((box[1] - pad_top) / scale_h)
        x2 = int((box[2] - pad_left) / scale_w)
        y2 = int((box[3] - pad_top) / scale_h)

        # 裁剪到图像范围内
        x1, y1 = max(0, x1), max(0, y1)
        x2, y2 = min(w, x2), min(h, y2)

        # 绘制矩形和标签
        cv2.rectangle(orig_img, (x1, y1), (x2, y2), (0, 255, 0), 2)
        label = f"{class_id}: {score:.2f}"
        cv2.putText(orig_img, label, (x1, y1 - 10),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)

    # 显示结果
    plt.figure(figsize=(12, 8))
    plt.imshow(cv2.cvtColor(orig_img, cv2.COLOR_BGR2RGB))
    plt.axis('off')
    plt.show()

# 主流程
image_path = "D:/ideaSpace/rag-in-action-master/wukong.jpg"
orig_img, input_tensor, preprocess_info = preprocess_image(image_path)
scale, padding = preprocess_info

sess = ort.InferenceSession(r"D:\ideaSpace\MyPython\models\yolo_x_layout\yolox_l0.05.onnx")
outputs = sess.run(None, {sess.get_inputs()[0].name: input_tensor})

boxes, scores, class_ids = postprocess(outputs)
print(f"检测到 {len(boxes)} 个目标：")
for box, score, class_id in zip(boxes, scores, class_ids):
    print(f"- 类别 {class_id}, 置信度 {score:.2f}, 位置 {box}")

visualize(orig_img, boxes, scores, class_ids, scale, padding)