import onnx
import onnxruntime as ort
import numpy as np

model = onnx.load('rnt_18_2.onnx')
onnx.checker.check_model(model)

session = ort.InferenceSession('rnt_18_2.onnx')
x = np.random.randn(1, 3, 224, 224).astype(np.float32)  # 注意输入type一定要np.float32!!!!!
# x= torch.randn(batch_size,chancel,h,w)
# 获取模型输入的名称
input_name = session.get_inputs()[0].name

# 运行模型
outputs = session.run(None, {input_name: x})

# 输出结果
print(outputs)