﻿#include <iostream>
#include <NvInfer.h>
#include <NvInferPlugin.h>
#include <NvOnnxConfig.h>
#include <NvOnnxParser.h>
#include <NvUffParser.h>
#include <NvInferRuntimeCommon.h>
#include <ostream>
#include <fstream>
#include <sstream>
#include <vector>


namespace trtonnx 
{
    using namespace nvinfer1;
    using namespace nvonnxparser;

    
    #define n 16

    class Logger : public nvinfer1::ILogger
    {
        void log(Severity severity, const char* msg) noexcept override
        {
            // suppress info-level messages
            if (severity != Severity::kINFO)
                std::cout << msg << std::endl;
        }
    };

    struct Config {
        ICudaEngine* config_engine;
        void** config_buffers;
        std::vector<bool> config_bindings_is_input;
        cudaStream_t config_stream;
        IRuntime* config_runtime;
        IExecutionContext* config_context;
    };

    void onnx2trt(const char* model_name, int workspace, int batchsize, bool verbose = true)
    {
        //https://zhuanlan.zhihu.com/p/322520579
        //init logger

        Logger gLogger;

        //create builder
        nvinfer1::IBuilder* builder = nvinfer1::createInferBuilder(gLogger);

        //create network
        nvinfer1::INetworkDefinition* network = builder->createNetworkV2(1U << static_cast<uint32_t>(NetworkDefinitionCreationFlag::kEXPLICIT_BATCH));
        builder->setMaxBatchSize(batchsize);
        //create parser
        nvonnxparser::IParser* parser = nvonnxparser::createParser(*network, gLogger);
        // onnx 模型
        parser->parseFromFile(model_name, int(nvinfer1::ILogger::Severity::kWARNING));

        // 打印相关信息
        if (verbose)
        {
            for (int i = 0; i < parser->getNbErrors(); ++i)
            {
                std::cout << parser->getError(i)->desc() << std::endl;
            }
        }

        //create config
        nvinfer1::IBuilderConfig* config = builder->createBuilderConfig();
        config->setMaxWorkspaceSize(workspace);

        //create engine and save engine model,输出model.engine文件，用于tensorRT推理。 （buildCudaEngine的写法后面就不支持了）
        nvinfer1::ICudaEngine* engine1 = builder->buildEngineWithConfig(*network, *config);

        // serialize（序列化引擎用于保存）
        nvinfer1::IHostMemory* seriallizedModel = engine1->serialize();
        std::string serialize_str;
        std::ofstream serialize_output_stream;
        serialize_str.resize(seriallizedModel->size());
        memcpy((void*)serialize_str.data(), seriallizedModel->data(), seriallizedModel->size());
        serialize_output_stream.open("./model.engine");
        serialize_output_stream << serialize_str;
        serialize_output_stream.close();
        // save engine
        std::ofstream p("model.engine", std::ios::binary);
        if (!p)
        {
            std::cerr << "could not open plan output file" << std::endl;
        }
        p.write(reinterpret_cast<const char*>(seriallizedModel->data()), seriallizedModel->size());
    }


    int get_features(Dims tmp)
    {
        int num_features = 1;
        for (int j = 1; j < tmp.MAX_DIMS; j++)
        {
            if (j == 0)
                return num_features;
            else
                num_features = num_features * tmp.d[j];
        }
    }


    void load_engine(const char* engine_name, int batchsize, int index, float value[])
    {
        //https://zhuanlan.zhihu.com/p/408220584
        Logger gLogger;

        //load engine
        std::ifstream file(engine_name, std::ios::binary);
        if (file.good())
            file.seekg(0, file.end);
        auto size = file.tellg();
        file.seekg(0, file.beg);
        auto trtModelStream = new char[size];
        file.read(trtModelStream, size);
        file.close();


        //创建上下文环境
        IRuntime* runtime = createInferRuntime(gLogger);

        ICudaEngine* engine = runtime->deserializeCudaEngine(trtModelStream, size);

        IExecutionContext* context = engine->createExecutionContext();

        delete[] trtModelStream;

        //最多只能接受16个 输入输出 
        void* buffers[n];
        //std::cout << engine->getNbBindings() << std::endl;

        std::vector<bool> bindings_is_input;
        for (int i = 0; i < engine->getNbBindings(); i++)
        {
            auto BLOB_NAME = engine->getBindingName(i);
            const int index = engine->getBindingIndex(BLOB_NAME);
            Dims tmp = engine->getBindingDimensions(i);
            int num_features = get_features(tmp);
            cudaMalloc(&buffers[index], num_features * sizeof(float));
            if (engine->bindingIsInput(i))
                bindings_is_input.push_back(true);
            else
                bindings_is_input.push_back(false);
        }

        // Create stream
        cudaStream_t stream;
        cudaStreamCreate(&stream);

        cudaMemcpyAsync(buffers[index], value, get_features(engine->getBindingDimensions(index)) * sizeof(float), cudaMemcpyHostToDevice, stream);

        context->enqueue(1, buffers, stream, nullptr);


        float outputTensor[n];

        for (int i = 0; i < bindings_is_input.size(); i++) {
            if (bindings_is_input[i] == false) {
                cudaMemcpyAsync(outputTensor, buffers[i], get_features(engine->getBindingDimensions(i)) * sizeof(float), cudaMemcpyDeviceToHost, stream);
            }
        }

        cudaStreamSynchronize(stream);
        
        for(int i=0;i<batchsize;i++)
            std::cout << '\n' << outputTensor[i] << '\n' << std::endl; //查看输出

        cudaStreamDestroy(stream);
        for (int i = 0; i < engine->getNbBindings(); i++)
            cudaFree(buffers[i]);
    }

}
int main()
{
    int batchsize = 16;
    bool verbose = false;
    int workspace = 1 << 20;

    const char* model_name = "C:/Users/张宇杰/Desktop/TensorRT_C++/Project1/model.onnx";
    trtonnx::onnx2trt(model_name, workspace, batchsize, verbose);
    float value[] = { 1,1,3,5,5,5,5,1,5,1,1,1,3,5,5,5,5,1,5,1 };
    trtonnx::load_engine("C:/Users/张宇杰/Desktop/TensorRT_C++/Project1/Project1/model.engine", batchsize, 0, value);


    std::cout << "运行结束" << std::endl;
    std::cin.get();
}