import numpy as np
from PyCmpltrtok.common import *
import tensorrt as trt
import pycuda.driver as cuda
import pycuda.autoinit


def prepare(trt_path, output_shape, dtype, dummy_input_batch):
    """import pycuda.autoinit"""
    runtime = trt.Runtime(trt.Logger(trt.Logger.WARNING))
    with open(trt_path, 'rb') as f:
        engine = runtime.deserialize_cuda_engine(f.read())
    context = engine.create_execution_context()

    # allocate memory
    dummy_output_batch = np.empty(output_shape, dtype=dtype)
    d_input = cuda.mem_alloc(1 * dummy_input_batch.nbytes)
    d_output = cuda.mem_alloc(1 * dummy_output_batch.nbytes)
    # print(d_input, d_output)
    bindings = [int(d_input), int(d_output)]
    stream = cuda.Stream()
    return context, d_input, d_output, bindings, stream, dummy_output_batch


def run(context, d_input, d_output, bindings, stream, dummy_output_batch, input):
    output = np.empty_like(dummy_output_batch, dtype=dummy_output_batch.dtype)

    # Transfer input data to device
    cuda.memcpy_htod_async(d_input, input, stream)
    # Execute model
    context.execute_async_v2(bindings, stream.handle, None)
    # Transfer predictions back
    cuda.memcpy_dtoh_async(output, d_output, stream)
    # Syncronize threads
    stream.synchronize()

    return output


if '__main__' == __name__:

    def _main():
        trt_path = '/home/asuspei/PycharmProjects/AsusCondaP37Torch1101Cuda111/python_ai/category/onnx/torch2onnx/_save/vgg16_torch2onnx.py/v1.0/2022_06_17_12_54_40_476426-64.trt'
        BATCH_SIZE = 64
        N_CLASSES = 10
        PRECISION = np.float32
        dummy_input_batch = np.zeros((BATCH_SIZE, 224, 224, 3), dtype=PRECISION)

        context, d_input, d_output, bindings, stream, dummy_output_batch = prepare(trt_path, [BATCH_SIZE, N_CLASSES], PRECISION, dummy_input_batch)
        for i in range(4):
            print(i)
            predictions = run(context, d_input, d_output, bindings, stream, dummy_output_batch, dummy_input_batch)
            check_np(predictions, 'predictions')

    _main()
