# -*- coding: utf-8 -*-
# @Time    : 2024/12/19 下午2:29
# @Author  : ysj
# @FileName: demo_an.py
# @Software: PyCharm
# @Blog    ：https://blog.csdn.net/ydscc?type=blog
import onnxruntime as ort
import cv2
import numpy as np

# 加载ONNX模型
onnx_path = 'yolov5m.onnx'
session = ort.InferenceSession(onnx_path)

# 获取输入和输出的名称
input_name = session.get_inputs()[0].name
output_name = session.get_outputs()[0].name

# 读取输入图像并进行预处理
img_path = 'data/images/zidane.jpg'
img = cv2.imread(img_path)
img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img_resized = cv2.resize(img_rgb, (640, 640))  # 调整为模型输入尺寸

# 转换为float32并归一化
img_normalized = img_resized.astype(np.float32) / 255.0  # 归一化到 [0, 1]
img_input = np.transpose(img_normalized, (2, 0, 1))  # 转换为 (C, H, W) 格式
img_input = np.expand_dims(img_input, axis=0)  # 添加 batch 维度

# 推理
outputs = session.run([output_name], {input_name: img_input})

# 输出的结果是一个列表，包含检测结果
pred = outputs[0]

# 将输出结果进行后处理
# pred 的形状可能是 (batch_size, num_boxes, 6)，每个框的格式：[x_center, y_center, width, height, confidence, class]
boxes = pred[0][:, :4]  # 边界框坐标
confidences = pred[0][:, 4]  # 置信度
classes = pred[0][:, 5]  # 类别

# 将边界框转换为左上角和右下角坐标
img_height, img_width, _ = img.shape
for i in range(len(boxes)):
    x_center, y_center, w, h = boxes[i]

    x1 = int((x_center - w / 2) * img_width)
    y1 = int((y_center - h / 2) * img_height)
    x2 = int((x_center + w / 2) * img_width)
    y2 = int((y_center + h / 2) * img_height)

    # 绘制边界框
    cv2.rectangle(img, (x1, y1), (x2, y2), (255, 0, 0), 2)

    # 绘制类别和置信度
    label = f"{classes[i]} {confidences[i]:.2f}"
    cv2.putText(img, label, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)

# 显示结果
cv2.imshow('Result', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
