import sys
import onnx
import onnxruntime
import cv2
import numpy as np

model=onnx.load(sys.argv[1])
print(f"=========模型 {sys.argv[1]} 信息===========");
print("Product name:", model.producer_name)
print("Product version:", model.producer_version)
print("Opset version:", model.opset_import[0].version)

#构建推理器
ort_session=onnxruntime.InferenceSession(sys.argv[1])
input_name=ort_session.get_inputs()[0].name
output_name=ort_session.get_outputs()[0].name
print("输入名称:",input_name)
print("输出名称:", output_name)

for input in ort_session.get_inputs():
    print(f"输入名称: {input.name}")
    print(f"输入形状: {input.shape}")  # 例如 [1, 1000] 或 [None, 3, 224, 224]
    print(f"输入数据类型: {input.type}")   # 例如 tensor(float32)

for output in ort_session.get_outputs():
    print(f"输出名称: {output.name}")
    print(f"输出形状: {output.shape}")  # 例如 [1, 1000] 或 [None, 3, 224, 224]
    print(f"数据类型: {output.type}")   # 例如 tensor(float32)

#准备输入数据
image = cv2.imread(sys.argv[2])
image = cv2.resize(image,(480,640))
image = image.astype(np.float32) / 255.0
image = np.transpose(image,(2,0,1))
image = np.expand_dims(image,axis=0)
print(image.shape)

#运行推理
outputs = ort_session.run(None,{input_name:image})
print(outputs[0].shape)
print(outputs[1].shape)
print(outputs[2].shape)

confs = outputs[1].squeeze(0)
boxs = outputs[0].squeeze(0)
landmarks = outputs[2].squeeze(0)
for box,conf,landmark in zip(boxs,confs,landmarks):
    print(conf[1])
    if conf[1] > 0.4:
        print(box)
        print(conf)
        print(landmark)