import os
import torch.nn as nn
import numpy as np

os.environ["CUDA_VISIBLE_DEVICES"] = '0'

import pycuda.driver as cuda
import pycuda.autoinit
import tensorrt as trt
from transformers import BertTokenizer

path = '/data0/jianyu10/prompt-bert/UseModel/prompt.trt'
model_path = '/data0/jianyu10/PTM/huggingface_model_cache/chinese-roberta-wwm-ext'
cuda.init()
TRT_LOGGER = trt.Logger(trt.Logger.VERBOSE)
cfx = cuda.Device(0).make_context()  # 2. trt engine创建前首先初始化cuda上下文
if os.path.exists(path):
    print("Reading engine from file: {}".format(path))
    with open(path, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime:
        engine = runtime.deserialize_cuda_engine(f.read())  # 反序列化
        context = engine.create_execution_context()

tokenizer = BertTokenizer.from_pretrained("hfl/chinese-roberta-wwm-ext", cache_dir=model_path)


def infer(title):
    sentence = f'{title}，它的意思是[MASK]。'
    sen_tmp = f'{title}，这句话的意思是[MASK]。'
    oriids = tokenizer.encode_plus(sentence)['input_ids']
    temids = tokenizer.encode_plus(sen_tmp)['input_ids']
    dummy_input1 = np.array(oriids, dtype=np.int32)
    dummy_input2 = np.array(temids, dtype=np.int32)
    inputs = (dummy_input1, dummy_input2)
    d_input = []
    for input_ids in inputs:
        d_input.append(cuda.mem_alloc(input_ids.nbytes))
    d_input = tuple(d_input)
    size = trt.volume(engine.get_binding_shape('output'))  # 动态输入
    size = abs(size)  # 上面得到的size(0)可能为负数，会导致OOM
    dtype = trt.nptype(engine.get_binding_dtype('output'))
    h_output = cuda.pagelocked_empty(size, dtype)
    d_output = cuda.mem_alloc(h_output.nbytes)


    def inference(self, context, bindings, inputs, outputs, stream):
        # Transfer data from CPU to the GPU.
        [cuda.memcpy_htod_async(inp.device, inp.host, stream) for inp in inputs]
        # Run inference.
        # 如果创建network时显式指定了batchsize，使用execute_async_v2, 否则使用execute_async
        context.execute_async_v2(bindings=bindings, stream_handle=stream.handle)
        # Transfer predictions back from the GPU.
        [cuda.memcpy_dtoh_async(out.host, out.device, stream) for out in outputs]

        # gpu to cpu
        # Synchronize the stream
        stream.synchronize()
        # Return only the host outputs.
        return [out.host for out in outputs]

    def infer(self, d_inputs, h_inputs, d_output, h_output, stream):
        self.cfx.push()
        [cuda.memcpy_htod_async(d_inputs[i], h_inputs[i], stream) for i in range(2)]

        self.context.execute_async_v2(bindings=[int(d_inp) for d_inp in d_inputs] + [int(d_output)],
                                      stream_handle=stream.handle)
        cuda.memcpy_dtoh_async(h_output, d_output, stream)
        stream.synchronize()
        self.cfx.pop()

    def __getids(self, title, tokenizer):
        """
        处理标题为模型输入
        """

    def preprocess(self, title):
        oriids, temids = self.__getids(title, self.tokenizer)

        return (dummy_input1, dummy_input2)

    def forward(self, title):
        if not self.loadtrtflag:
            print('frist of all you need load tensorRT model. [X.load_from_trt()] ')
            return
        h_inputs = self.preprocess(title)
        d_inputs, h_output, d_output = self.allocate_buffers(h_inputs)
        stream = cuda.Stream()
        self.infer(d_inputs, h_inputs, d_output, h_output, stream)
        output1 = h_output[0]
        return output1.tolist()


if __name__ == '__main__':
    path = '/data0/jianyu10/prompt-bert/UseModel/prompt.trt'
    model_path = '/data0/jianyu10/PTM/huggingface_model_cache/chinese-roberta-wwm-ext'
    trtmodel = TrtModel(path, model_path)
    titles = ['英国首相府违规聚会调查报告认为政府“领导不力”---调查报告认为政府“领导不力”',
              '在欢声笑语中展现新时代新征程上精气神---2022年春节联欢晚会——在欢声笑语中展现新时代新征程上精气神',
              '因发生斗殴事件致2名犯人死亡 美国联邦监狱进入封锁状态---美国休斯敦发生枪击事件 致1名警员死亡',
              '英媒：泽连斯基下令三年扩军十万 敦促议员不要散布恐慌---泽连斯基签署法令：未来三年内扩军十万人',
              '冬季风暴渐平息 美部分地区降雪超50厘米---冬季风暴持续影响美国大部分地区',
              '英媒：特朗普大举筹款瞄准2024总统大选---特朗普又夸下海口：若2024再当选总统，将赦免国会大厦骚乱者',
              '多国政要和国际组织官员贺新春 祝新年如虎添翼---视频｜多位国际组织负责人及国家政要送上新春祝福',
              '“美国‘超额死亡’人数已近百万”---华尔街日报：疫情下，美国“超额死亡”人数已近百万',
              '日本外相：驻日美军入境新冠检测所用方法未被日本认可有效---外媒：驻日美军人员被曝离美前未进行新冠检测',
              '除夕，布林肯又发新闻公报拜年：愿虎年给所有人带来机遇、成功和健康---美国务卿布林肯拜年：愿虎年给所有人带来机遇、成功和健康']
    for i in titles:
        ori, sim = i.split('---')
        print(trtmodel(ori))
