import torch
import tensorrt as trt
import pycuda.autoinit
import pycuda.driver as cuda
import numpy as np


def get_workspace_size(workspace):
    '''
    :param workspace(str): 需要开辟的空间大小
    :return: 开辟空间大小(float)
    '''
    if 'G' in workspace:
        try:
            return int((1 << 30) * float(workspace[:workspace.index('G')]))
        except:
            print('错误： 输入的 workspace 参数不能被识别')
    elif 'M' in workspace:
        try:
            return int((1 << 20) * float(workspace[:workspace.index('M')]))
        except:
            print('错误： 输入的 workspace 参数不能被识别')


def onnx2trt(model_file,workspace,show_detail=False,engine_file='',max_batch_size = 1):
    '''
    :param model_file(str): Onnx文件路径
    :param workspace(str): 设置config文件的最大工作空间,例：'2.5G'、'3M'
    :param show_detail(bool): 是否展示模型细节
    :param engine_file(str): engine序列化文件保存路径
    :param max_batch_size(int): engine推理时的最大batch
    :return: engine : 序列化模型
    '''
    #创建logger、builder、network、parser
    logger = trt.Logger(trt.Logger.VERBOSE)
    builder = trt.Builder(logger)
    network = builder.create_network(1)
    parser = trt.OnnxParser(network,logger)

    #解析模型
    success = parser.parse_from_file(model_file)
    if not success:
        print('错误： Onnx模型解析失败')
        return
    if show_detail:
        for idx in range(parser.num_errors):
            print(parser.get_error(idx))

    #构建引擎
    builder.max_batch_size = max_batch_size
    config = builder.create_builder_config()  #创建config
    config.max_workspace_size = get_workspace_size(workspace)

    #序列化引擎
    try:
        engine = builder.build_serialized_network(network,config)
    except:
        print('错误： 引擎序列化失败')
    if engine_file:
        try:
            with open(engine_file, 'wb') as f:  # 保存引擎
                f.write(engine)
            print(engine_file,'保存成功！')
        except:
            print('错误： 序列化模型保存失败')
    return engine


class TRTensor():
    def __init__(self, host_mem, device_mem):
        self.host = host_mem
        self.device = device_mem

    def __str__(self):
        return "Host:\n" + str(self.host) + "\nDevice:\n" + str(self.device)

    def __repr__(self):
        return self.__str__()


class MyLogger(trt.ILogger):
    def __init__(self):
       trt.ILogger.__init__(self)

    def log(self, severity, msg):
        pass # Your custom logging implementation here


def infer_init(engine_file):
    logger = MyLogger()
    #logger = trt.Logger(trt.Logger.VERBOSE)
    runtime = trt.Runtime(logger)
    # 首先，加载序列化引擎
    if isinstance(engine_file,str):
        with open(engine_file, 'rb') as f:
            serialized_engine = f.read()
        # 然后，您可以从内存缓冲区反序列化引擎：
        engine = runtime.deserialize_cuda_engine(serialized_engine)
    else:
        engine = runtime.deserialize_cuda_engine(engine_file)
    # 执行推理
    # 引擎拥有优化的模型，但要执行推理需要额外的中间激活状态。这是通过执行上下文 界面：
    context = engine.create_execution_context()
    # 一个引擎可以有多个执行上下文，允许一组权重用于多个重叠的推理任务。（当前的一个例外是使用动态形状时，每个优化配置文件只能有一个执行上下文。）
    # 要执行推理，您必须为输入和输出传递 TensorRT 缓冲区，TensorRT 要求您在 GPU 指针列表中指定。您可以使用为输入和输出张量提供的名称查询引擎，以在数组中找到正确的位置：
    inputs = []
    outputs = []
    bindings = []
    stream = cuda.Stream()
    for binding in engine:
        size = trt.volume(engine.get_binding_shape(binding)) * engine.max_batch_size  # 指定需要开辟的空间(不考虑dtype)
        dtype = trt.nptype(engine.get_binding_dtype(binding))
        # Allocate host and device buffers
        host_mem = cuda.pagelocked_empty(size, dtype)  # 实际开辟空间（考虑dtype）
        device_mem = cuda.mem_alloc(host_mem.nbytes)  # 将存储的地址保存下来
        # Append the device buffer to device bindings.
        bindings.append(int(device_mem))
        # Append to the appropriate list.
        if engine.binding_is_input(binding):  # engine[0]为输入
            inputs.append(TRTensor(host_mem, device_mem))
        else:  # engine[1]为输出
            outputs.append(TRTensor(host_mem, device_mem))
    return [logger,runtime,engine,context,stream],[inputs,outputs,bindings]

def infer_setinput(list2,index,values):
    [inputs, outputs, bindings] = list2
    length = trt.volume(np.array(values).shape)
    inputs[index].host[:length] = list(np.array(values).reshape(-1))
    return [inputs, outputs, bindings]

def infer_inference(list1,list2):
    [logger, runtime, engine, context,stream], [inputs, outputs, bindings] = list1, list2
    [cuda.memcpy_htod_async(inp.device, inp.host, stream) for inp in inputs]
    # Run inference.
    context.execute_async_v2(bindings=bindings, stream_handle=stream.handle)
    # Transfer predictions back from the GPU.
    [cuda.memcpy_dtoh_async(out.host, out.device, stream) for out in outputs]
    # Synchronize the stream
    stream.synchronize()
    # Return only the host outputs.
    [out.host for out in outputs]
    return outputs



#Debug
if __name__ == '__main__':
    engine = onnx2trt(model_file = 'model.onnx', workspace='30M', show_detail=False, engine_file='model.engine', max_batch_size=1)
    engine_file = 'model.engine'
    engine_file = engine
    list1, list2 = infer_init(engine_file)
    list2 = infer_setinput(list2,0,[12,3,1,2,5,47,8])
    infer_inference(list1,list2)