import os
import torch
import torch.nn as nn
import numpy as np

os.environ["CUDA_VISIBLE_DEVICES"] = '0'

import pycuda.driver as cuda
import pycuda.autoinit
import tensorrt as trt
from transformers import BertTokenizer

torch.cuda.set_device('cuda:0')
TRT_LOGGER = trt.Logger(trt.Logger.VERBOSE)


class HostDeviceMem(object):
    def __init__(self, host_mem, device_mem):
        """
        host_mem: cpu memory
        device_mem: gpu memory
        """
        self.host = host_mem
        self.device = device_mem

    def __str__(self):
        return "Host:\n" + str(self.host) + "\nDevice:\n" + str(self.device)

    def __repr__(self):
        return self.__str__()


class TrtModel(nn.Module):
    def __init__(self, enginepath, tokenizerpath):
        super(TrtModel, self).__init__()
        self.loadtrtflag = False
        cuda.init()
        self.cfx = cuda.Device(0).make_context()  # 2. trt engine创建前首先初始化cuda上下文
        self.load_from_trt(enginepath)
        self.tokenizer = BertTokenizer.from_pretrained("hfl/chinese-roberta-wwm-ext", cache_dir=tokenizerpath)

    def load_from_trt(self, engine_file_path):
        if not os.path.exists(engine_file_path):
            print(f"tensorRT model doesn't exist!")
            return
        else:
            print("Reading engine from file: {}".format(engine_file_path))
            with open(engine_file_path, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime:
                self.engine = runtime.deserialize_cuda_engine(f.read())  # 反序列化
                self.context = self.engine.create_execution_context()
                self.inputs, self.outputs, self.bindings = self.allocate_buffers()
                self.loadtrtflag = True

    def __del__(self):
        del self.inputs
        del self.outputs
        del self.stream
        self.cfx.detach()  # 2. 实例释放时需要detech cuda上下文

    def allocate_buffers(self):
        inputs, outputs, bindings = [], [], []
        self.stream = cuda.Stream()
        for binding in self.engine:
            # size = trt.volume(engine.get_binding_shape(binding)) * engine.max_batch_size  # 非动态输入
            size = trt.volume(self.engine.get_binding_shape(binding))  # 动态输入
            size = abs(size)  # 上面得到的size(0)可能为负数，会导致OOM
            dtype = trt.nptype(self.engine.get_binding_dtype(binding))
            host_mem = cuda.pagelocked_empty(size, dtype)  # 创建锁业内存
            device_mem = cuda.mem_alloc(host_mem.nbytes)  # cuda分配空间
            bindings.append(int(device_mem))  # binding在计算图中的缓冲地址
            if self.engine.binding_is_input(binding):
                inputs.append(HostDeviceMem(host_mem, device_mem))
            else:
                outputs.append(HostDeviceMem(host_mem, device_mem))
        return inputs, outputs, bindings

    def inference(self, context, bindings, inputs, outputs, stream):
        self.cfx.push()
        # Transfer data from CPU to the GPU.
        [cuda.memcpy_htod_async(inp.device, inp.host, stream) for inp in inputs]
        # Run inference.
        # 如果创建network时显式指定了batchsize，使用execute_async_v2, 否则使用execute_async
        context.execute_async_v2(bindings=bindings, stream_handle=stream.handle)
        # Transfer predictions back from the GPU.
        [cuda.memcpy_dtoh_async(out.host, out.device, stream) for out in outputs]
        # gpu to cpu
        # Synchronize the stream
        stream.synchronize()
        self.cfx.pop()

        # Return only the host outputs.
        return [out.host for out in outputs]

    def __getids(self, title, tokenizer):
        """
        处理标题为模型输入
        """
        # if len(title) == 1:
        #     sentence = f'{title}，它的意思是[MASK]。'
        #     sen_tmp = f'{title}，这句话的意思是[MASK]。'
        # elif len(title) > 1:
        #     sentence = [f'{i}，它的意思是[MASK]。' for i in title]
        #     sen_tmp = [f'{i}，这句话的意思是[MASK]。' for i in title]
        sentence = f'{title}，它的意思是[MASK]。'
        sen_tmp = f'{title}，这句话的意思是[MASK]。'
        oriids = tokenizer.encode_plus(sentence)
        temids = tokenizer.encode_plus(sen_tmp)
        return oriids['input_ids'], temids['input_ids']

    def input2cpu(self, titleids, simtitleids):
        dummy_input1 = np.array(titleids, dtype=np.int32)
        dummy_input2 = np.array(simtitleids, dtype=np.int32)
        self.inputs[0].host = dummy_input1
        self.inputs[1].host = dummy_input2

        # 如果是动态输入，需以下设置
        idx = self.engine.get_binding_index('input_ids')
        idx1 = self.engine.get_binding_index('input_tem')
        self.context.set_binding_shape(idx, (1, 123))
        self.context.set_binding_shape(idx1, (1, 123))

    def preprocess(self, title):
        oriids, temids = self.__getids(title, self.tokenizer)
        self.input2cpu(oriids, temids)

    def forward(self, title):
        if not self.loadtrtflag:
            print('frist of all you need load tensorRT model. [X.load_from_trt()] ')
            return
        self.preprocess(title)
        trt_outputs1 = self.inference(self.context, bindings=self.bindings, inputs=self.inputs, outputs=self.outputs,
                                      stream=self.stream)
        output1 = trt_outputs1[0]

        return output1.tolist()


if __name__ == '__main__':
    pass
