import onnxruntime as ort
import numpy as np
import time
import argparse

def benchmark_onnx_model(model_path, device, input_shape, iterations):
    # Set up the appropriate execution provider
    available_providers = ort.get_available_providers()
    print(f"Available providers: {available_providers}")

    # Set up the appropriate execution provider
    if device == 'cpu':
        providers = ['CPUExecutionProvider']
    elif device == 'cuda' and 'CUDAExecutionProvider' in available_providers:
        providers = ['CUDAExecutionProvider']
    else:
        raise ValueError(f"Unsupported device specified or provider not available. Use 'cpu' or 'cuda'. Available providers: {available_providers}")


    # Create an ONNX runtime session
    session = ort.InferenceSession(model_path, providers=providers)

    # Generate random input data
    input_name = session.get_inputs()[0].name
    input_data = np.random.randn(*input_shape).astype(np.float32)

    # Warm up the session
    print("Warming up...")
    for _ in range(10):
        _ = session.run(None, {input_name: input_data})

    # Perform benchmarking
    times = []
    print("Benchmarking...")
    for _ in range(iterations):
        start_time = time.time()
        _ = session.run(None, {input_name: input_data})
        end_time = time.time()
        speed = (end_time - start_time)*1000        #ms
        times.append(speed)

    max_time = max(times)
    min_time = min(times)
    avg_time = sum(times) / len(times)

    return max_time, min_time, avg_time

if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="Benchmark ONNX model inference time")
    parser.add_argument("-m", "--model_path", required=True, type=str, help="Path to the ONNX model")
    parser.add_argument("-s", "--input_shape", required=True, type=int, nargs='+', help="Input tensor shape")
    parser.add_argument("-d", "--device", type=str, default='cpu', choices=['cpu', 'cuda'], help="Device to run the inference on (cpu or cuda)")
    parser.add_argument("-i", "--iterations",default=1, type=int, help="Number of iterations to run the benchmark")

    args = parser.parse_args()
    print(f'args:{args}')
    max_time, min_time, avg_time = benchmark_onnx_model(args.model_path, args.device, args.input_shape, args.iterations)
    print(f"Max inference time: {max_time:.6f} ms")
    print(f"Min inference time: {min_time:.6f} ms")
    print(f"Avg inference time: {avg_time:.6f} ms")


