import numpy as np
from PyCmpltrtok.common import *
import tensorrt as trt
import pycuda.driver as cuda
import pycuda.autoinit


class MyTrtHelper(object):

    def __init__(self, trt_path, dtype, input_shape, output_shape, is_dynamic_shape=False, output_dtype=None):
        """import pycuda.autoinit"""
        self.dtype = dtype
        self.input_shape = tuple(input_shape)
        self.output_shape = tuple(output_shape)
        self.is_dynamic_shape = is_dynamic_shape
        if output_dtype is None:
            self.output_dtype = self.dtype
        else:
            self.output_dtype = output_dtype

        print('dtype', self.dtype)
        print('output dtype', self.output_dtype)
        print('input shape', self.input_shape)
        print('output shape', self.output_shape)
        print('Dynamic shape', self.is_dynamic_shape)

        self.runtime = trt.Runtime(trt.Logger(trt.Logger.WARNING))
        with open(trt_path, 'rb') as f:
            self.engine = self.runtime.deserialize_cuda_engine(f.read())
        self.context = self.engine.create_execution_context()

        # allocate memory
        dummy_input_batch = np.empty(input_shape, dtype=self.dtype)
        dummy_output_batch = np.empty(output_shape, dtype=self.output_dtype)
        self.d_input = cuda.mem_alloc(1 * dummy_input_batch.nbytes)
        self.d_output = cuda.mem_alloc(1 * dummy_output_batch.nbytes)
        self.bindings = [int(self.d_input), int(self.d_output)]
        self.stream = cuda.Stream()

    def predict(self, input):
        assert input.dtype == self.dtype
        assert input.shape == self.input_shape
        input = np.ascontiguousarray(input)
        output = np.empty(self.output_shape, dtype=self.output_dtype)

        if self.is_dynamic_shape:
            self.context.set_binding_shape(
                self.engine.get_binding_index("input"),
                self.input_shape
            )

        # Transfer input data to device
        cuda.memcpy_htod_async(self.d_input, input, self.stream)
        # Execute model
        self.context.execute_async_v2(self.bindings, self.stream.handle, None)
        # Transfer predictions back
        cuda.memcpy_dtoh_async(output, self.d_output, self.stream)
        # Syncronize threads
        self.stream.synchronize()

        return output


if '__main__' == __name__:

    def _main():
        trt_path = '/home/asuspei/PycharmProjects/AsusCondaP37Torch1101Cuda111/python_ai/category/onnx/torch2onnx/_save/vgg16_torch2onnx.py/v1.0/2022_06_17_12_54_40_476426-64.trt'
        BATCH_SIZE = 64
        N_CLASSES = 10
        PRECISION = np.float32
        dummy_input_batch = np.zeros((BATCH_SIZE, 224, 224, 3), dtype=PRECISION)

        myObj = MyTrtHelper(trt_path, PRECISION, dummy_input_batch.shape, [BATCH_SIZE, N_CLASSES])
        for i in range(4):
            print(i)
            predictions = myObj.predict(dummy_input_batch)
            check_np(predictions, 'predictions')

    _main()
