import pycuda.driver as cuda
import numpy as np
import tensorrt as trt
import os
from transformers import BertTokenizer
import pycuda.autoinit

os.environ["CUDA_VISIBLE_DEVICES"] = '0'


def init():  # 1. 子进程开始初始化cuda driver
    cuda.init()


class HostDeviceMem(object):
    def __init__(self, host_mem, device_mem):
        self.host = host_mem
        self.device = device_mem

    def __str__(self):
        return "Host:\n" + str(self.host) + "\nDevice:\n" + str(self.device)

    def __repr__(self):
        return self.__str__()


TRT_LOGGER = trt.Logger()


class TensorRTEngine(object):
    def __init__(self, engine_path, tokenizer_path):
        self.cfx = cuda.Device(0).make_context()  # 2. trt engine创建前首先初始化cuda上下文
        self.load_engine(engine_path)

        self.tokenizer = BertTokenizer.from_pretrained("hfl/chinese-roberta-wwm-ext", cache_dir=tokenizer_path)

        self.inputs, self.outputs, self.bindings = self.allocate_buffers()

    def __del__(self):
        del self.inputs
        del self.outputs
        del self.stream
        self.cfx.detach()  # 2. 实例释放时需要detech cuda上下文

    def load_engine(self, engine_path):
        if not os.path.exists(engine_path):
            print(f"tensorRT model doesn't exist!")
            return
        if os.path.exists(engine_path):
            print("Reading engine from file: {}".format(engine_path))
            with open(engine_path, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime:
                self.engine = runtime.deserialize_cuda_engine(f.read())  # 反序列化
                self.context = self.engine.create_execution_context()
                self.stream = cuda.Stream()

    def allocate_buffers(self):
        inputs, outputs, bindings = [], [], []
        for binding in self.engine:
            # size = trt.volume(engine.get_binding_shape(binding)) * engine.max_batch_size  # 非动态输入
            size = trt.volume(self.engine.get_binding_shape(binding))  # 动态输入
            size = abs(size)  # 上面得到的size(0)可能为负数，会导致OOM
            dtype = trt.nptype(self.engine.get_binding_dtype(binding))
            host_mem = cuda.pagelocked_empty(size, dtype)  # 创建锁业内存
            device_mem = cuda.mem_alloc(host_mem.nbytes)  # cuda分配空间
            bindings.append(int(device_mem))  # binding在计算图中的缓冲地址
            if self.engine.binding_is_input(binding):
                inputs.append(HostDeviceMem(host_mem, device_mem))
            else:
                outputs.append(HostDeviceMem(host_mem, device_mem))
        return inputs, outputs, bindings

    def do_inference(self, context, bindings, inputs, outputs, stream):
        # Transfer input data to the GPU.
        [cuda.memcpy_htod_async(inp.device, inp.host, stream) for inp in inputs]
        # Run inference.
        context.execute_async_v2(bindings=bindings, stream_handle=stream.handle)
        # Transfer predictions back from the GPU.
        [cuda.memcpy_dtoh_async(out.host, out.device, stream) for out in outputs]
        # Synchronize the stream
        stream.synchronize()
        # Return only the host outputs.
        return [out.host for out in outputs]

    def __getids(self, title, tokenizer):
        """
        处理标题为模型输入
        """
        sentence = f'{title}，它的意思是[MASK]。'
        sen_tmp = f'{title}，这句话的意思是[MASK]。'
        oriids = tokenizer.encode_plus(sentence)
        temids = tokenizer.encode_plus(sen_tmp)
        return oriids['input_ids'], temids['input_ids']

    def preprocess(self, title):
        oriids, temids = self.__getids(title, self.tokenizer)
        dummy_input1 = np.array(oriids, dtype=np.int32)
        dummy_input2 = np.array(temids, dtype=np.int32)
        self.inputs[0].host = dummy_input1
        self.inputs[1].host = dummy_input2

        # 如果是动态输入，需以下设置
        idx = self.engine.get_binding_index('input_ids')
        idx1 = self.engine.get_binding_index('input_tem')
        self.context.set_binding_shape(idx, (1, 123))
        self.context.set_binding_shape(idx1, (1, 123))

    def postprocess(self, data):
        return data[0]

    def inference(self, title):
        self.preprocess(title)
        self.cfx.push()  # 3. 推理前执行cfx.push()
        trt_outputs = self.do_inference(self.context, bindings=self.bindings,
                                        inputs=self.inputs,
                                        outputs=self.outputs,
                                        stream=self.stream)

        output = self.postprocess(trt_outputs)
        self.cfx.pop()  # 3. 推理后执行cfx.pop()

        return output
