#! -*-coding:utf-8 -*-

import ctypes
import tensorrt as trt
import numpy as np
import torch
from torch2trt import torch2trt
from torch2trt import TRTModule

# 避免加载custom编译的so库时出现：OSError: libc10.so: cannot open shared object file: No such file or directory
import torch          
# from   common import *

# TensorRT的官方插件链接库
nvinfer_dso = "/usr/lib/x86_64-linux-gnu/libnvinfer_plugin.so"     # TensorRT官方的Plugin库
plugin_dso  = "./build/libgelu.so"                                 # CMakeLists.txt编译生成的链接库
# custom_infer_dll_file = "./build/lib.linux-x86_64-3.8/gelu_plugin.cpython-38-x86_64-linux-gnu.so"   # 使用PyTorch编译扩展setup.py文件生成的动态库

# TensorRT默认的插件推理库路径
nvinfer = ctypes.CDLL(nvinfer_dso, mode = ctypes.RTLD_GLOBAL)
pg      = ctypes.CDLL(plugin_dso,  mode = ctypes.RTLD_GLOBAL)
print('load nvinfer success!')
print('load customed plugin sucess!')

# TensorRT 初始化
TRT_LOGGER = trt.Logger(trt.Logger.INFO)
trt.init_libnvinfer_plugins(TRT_LOGGER, "")      # 将TensorRT预定义和用户自定义的Plugin全部加载进来
plg_registry = trt.get_plugin_registry()

# 根据插件的名字和版本加载插件，名字和版本都是自己取的，在CPP的源代码里有,本项目对应的内容如下：
"""
    static const char* GELE_PLUGIN_VERSION = "1.0";
    static const char* GELE_PLUGIN_NAME    = "CTyunGelu";
"""
plg_creator = plg_registry.get_plugin_creator("CTyunGelu", "1.0", "") 
# print(plg_creator)

# 这里其实并不是特别理解，可能要再看更多的Plugin例子
# 待补充
input_data = trt.PluginField("type_id", np.array([0], np.int32), trt.PluginFieldType.INT32)

# 就是注册使用trt.PluginField代码声明的这些字段集合
pfc = trt.PluginFieldCollection([input_data])

# 使用pfc这些字段集合产生对应的TensorRT层，或者说算子
gelu_layer = plg_creator.create_plugin("CTyunGelu", pfc)
# print(gelu_layer)

EXPLICIT_BATCH = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)

def GiB(val):
    """ a littile helper function for specify gigbyte workspace"""
    return val * 1 << 30

with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network:
    input_t = network.add_input(name="input", dtype=trt.float32, shape=(2,4))    # 2是bach，4是元素的个数
    inputs = [input_t]

    # 使用输入，及指定的Plugin算子层，将其将入到网络中并进行计算
    emb_layer = network.add_plugin_v2(inputs, gelu_layer)                    # 添加我们自己定义的网络层

    result = emb_layer.get_output(0)
    network.mark_output(tensor=result)                                      # 获取算子的运算结果并输出网络
    result.name = "output"

    config = builder.create_builder_config()                                # 网络的一些通用配置
    config.max_workspace_size =  GiB(2)
    engine = builder.build_engine(network, config)

    if False:
        serialized_engine = engine.serialize()
        with open('kwaigelu.engine', 'wb') as fout:
            fout.write(serialized_engine)
        print('engine serialized')

x       = torch.randn((2, 4), dtype=torch.float32).cuda()
model   = TRTModule(engine=engine, 
                    input_names=['input'], 
                    output_names=['output'])
print(model(x))

# gelu函数朴素实现
def gelu(x):
    return 0.5*x*(1+np.tanh(np.sqrt(2/np.pi)*(x+0.044715*np.power(x,3))))

# 对比Plugin的结果与朴素实现的结果
print(gelu(x.cpu().numpy()))