#!/usr/bin/python3

import time
import argparse

import numpy as np
import onnxruntime as ort
import torch


def prepare_input_data(input_name, input_shape, batchsize=None):
    assert all(isinstance(x, int) for x in input_shape[1:])
    if isinstance(input_shape[0], int):
        if batchsize and batchsize != input_shape[0]:
            raise Exception('the onnx can not support dynamic batchsize.')
    else:
        if batchsize:
            assert batchsize > 0, 'batchsize must greater than zero'
            input_shape[0] = batchsize
        else:
            input_shape[0] = 1
    data = np.zeros(input_shape).astype(np.float32)

    return {input_name: data}


def onnx_infer(onnx_path, batchsize=None, loop=100):
    
    providers = [
        'TensorrtExecutionProvider',
        #'CUDAExecutionProvider',
        #'CPUExecutionProvider'
    ]
    ort_session = ort.InferenceSession(onnx_path, providers=providers)
    inputs = ort_session.get_inputs()
    assert len(inputs) == 1, 'this script only support single input model.'
    input_feed = prepare_input_data(
                        inputs[0].name, inputs[0].shape, batchsize)
    batchsize = input_feed[inputs[0].name].shape[0]
    output_names = [o.name for o in ort_session.get_outputs()]

    assert loop > 1, 'loop must greater than 1.'
    duration_list = []
    for i in range(loop):
        start_time = time.time()
        onnx_result = ort_session.run(output_names, input_feed)
        end_time = time.time()
        duration = (end_time - start_time) * 1000
        duration_list.append(duration)
        print(f'{i+1:0>3} duration: {duration:.3f} ms.')
    
    time_spent = np.sum(duration_list)
    avg_time_without_first = np.sum(duration_list[1:]) / (loop - 1)
    throughput = 1000 * batchsize / avg_time_without_first

    print(f'{"-"*22}Performance Summary{"-"*23}')
    print(f'Test device: {ort.get_device()}')
    print(f'Providers: {ort_session.get_providers()}')
    print(f'BatchSize: {batchsize}')
    print(f'Total time: {time_spent:.3f} ms.')
    print(f'Average time without first time: {avg_time_without_first:.3f} ms.')
    print(f'Throughput: {throughput:.3f} fps.')
    print(f'{"-"*64}')


if __name__ == "__main__":

    parser = argparse.ArgumentParser('test performance of onnx model.')
    parser.add_argument('--onnx-path', type=str, help='path to onnx model.')
    parser.add_argument('--batchsize', default=None, type=int, 
                        help='batchsize of input data.')
    parser.add_argument('--loop', default=100, type=int, help='test times.')
    args = parser.parse_args()

    onnx_infer(args.onnx_path, args.batchsize, args.loop)

