import onnxruntime as ort | |
import numpy as np | |
def verify_onnx_model(onnx_model_path): | |
# Load the ONNX model | |
onnx_session = ort.InferenceSession(onnx_model_path) | |
# Display model input details | |
input_name = onnx_session.get_inputs()[0].name | |
input_shape = onnx_session.get_inputs()[0].shape | |
input_type = onnx_session.get_inputs()[0].type | |
print(f"Input Name: {input_name}, Shape: {input_shape}, Type: {input_type}") | |
# Display model output details | |
output_name = onnx_session.get_outputs()[0].name | |
output_shape = onnx_session.get_outputs()[0].shape | |
output_type = onnx_session.get_outputs()[0].type | |
print(f"Output Name: {output_name}, Shape: {output_shape}, Type: {output_type}") | |
# Generate a dummy input matching the input shape | |
# Assuming input shape is [None, 128, 128, 3], where None is the batch size | |
dummy_input = np.random.rand(1, 128, 128, 3).astype(np.float32) | |
# Perform inference | |
result = onnx_session.run([output_name], {input_name: dummy_input}) | |
print(f"Inference Result: {result}") | |
# Path to the ONNX model | |
onnx_model_path = './model.onnx' | |
verify_onnx_model(onnx_model_path) | |