# test_onnx.py

import cv2
import numpy as np
import onnxruntime as ort
import os

# -------------------------------
# 配置参数
# -------------------------------
ONNX_MODEL_PATH = 'D:\\CodeCNN\\weights-test\\best30.onnx'
IMAGE_PATH = r'D:\testpic\detect_20250923_105455_1758596095529.jpg'  # 替换为你的测试图片路径
OUTPUT_PATH = r'D:\testpic\detect_20250923_105455_1758596095529_out.jpg'  # 结果保存路径

# 类别名称（请根据你的训练数据修改！例如 helmet 数据集）
CLASSES = ['hel', 'nohel','wheel']  # 示例：2 个类别，按训练时顺序排列
COLORS = [(0, 255, 0), (255, 0, 0), (0, 0, 255)]  # 绿色、蓝色等

# 输入尺寸（必须与导出时一致）
IMG_SIZE = 640

# 置信度阈值（过滤低置信度预测）
CONF_THRESHOLD = 0.5

# -------------------------------
# 加载 ONNX 模型
# -------------------------------
print("Loading ONNX model...")
ort_session = ort.InferenceSession(ONNX_MODEL_PATH)

# 获取输入/输出节点名
input_name = ort_session.get_inputs()[0].name
output_name = ort_session.get_outputs()[0].name
print(f"Input name: {input_name}, Output name: {output_name}")


# -------------------------------
# 图像预处理
# -------------------------------
def preprocess_image(image_path, img_size=640):
    img = cv2.imread(image_path)
    if img is None:
        raise FileNotFoundError(f"无法加载图像: {image_path}")

    h, w = img.shape[:2]
    scale = img_size / max(h, w)
    new_h, new_w = int(h * scale), int(w * scale)

    resized = cv2.resize(img, (new_w, new_h))

    # 创建灰底填充至 640x640
    padded = np.full((img_size, img_size, 3), 114, dtype=np.uint8)  # OpenCV 默认 BGR
    pad_h, pad_w = (img_size - new_h) // 2, (img_size - new_w) // 2
    padded[pad_h:pad_h + new_h, pad_w:pad_w + new_w] = resized

    # HWC -> CHW -> BCHW
    input_tensor = padded.transpose(2, 0, 1)[np.newaxis, :, :, :].astype(np.float32)

    return img, input_tensor, scale, pad_w, pad_h


# -------------------------------
# 后处理并可视化
# -------------------------------
def postprocess_and_visualize(original_img, output, scale, pad_w, pad_h, conf_threshold=0.25):
    output = output[0]  # shape: (300, 6) -> [x1,y1,x2,y2,conf,class_id]

    valid_dets = output[output[:, 4] > conf_threshold]

    for det in valid_dets:
        x1, y1, x2, y2, conf, cls_id = det
        cls_id = int(cls_id)

        # 去除填充 & 缩放回原图坐标
        x1 = (x1 - pad_w) / scale
        y1 = (y1 - pad_h) / scale
        x2 = (x2 - pad_w) / scale
        y2 = (y2 - pad_h) / scale

        # 限制在图像范围内
        x1 = np.clip(x1, 0, original_img.shape[1])
        y1 = np.clip(y1, 0, original_img.shape[0])
        x2 = np.clip(x2, 0, original_img.shape[1])
        y2 = np.clip(y2, 0, original_img.shape[0])

        # 绘制矩形框和标签
        color = COLORS[cls_id % len(COLORS)]
        cv2.rectangle(original_img, (int(x1), int(y1)), (int(x2), int(y2)), color, 2)

        label = f"{CLASSES[cls_id]}: {conf:.2f}"
        font_scale = 0.7
        thickness = 2
        (w_text, h_text), _ = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, font_scale, thickness)
        cv2.rectangle(original_img, (int(x1), int(y1) - h_text - 10), (int(x1) + w_text, int(y1)), color, -1)
        cv2.putText(original_img, label, (int(x1), int(y1) - 5), cv2.FONT_HERSHEY_SIMPLEX,
                    font_scale, (255, 255, 255), thickness, cv2.LINE_AA)

    return original_img


# -------------------------------
# 主推理流程
# -------------------------------
if __name__ == "__main__":
    try:
        print("Preprocessing image...")
        original_img, input_tensor, scale, pad_w, pad_h = preprocess_image(IMAGE_PATH, IMG_SIZE)

        print("Running inference...")
        outputs = ort_session.run([output_name], {input_name: input_tensor})[0]  # shape: (1, 300, 6)

        print(f"Inference done. Detected {len(outputs[0])} boxes before filtering.")

        result_img = postprocess_and_visualize(
            original_img, outputs, scale, pad_w, pad_h, conf_threshold=CONF_THRESHOLD
        )

        # 保存结果
        cv2.imwrite(OUTPUT_PATH, result_img)
        print(f"结果已保存至: {OUTPUT_PATH}")

        # 显示图像（可选）
        cv2.imshow("ONNX Inference Result", result_img)
        cv2.waitKey(0)
        cv2.destroyAllWindows()

    except Exception as e:
        print(f"[ERROR] {e}")