

#include "NvInferRuntimeCommon.h"
#include "NvInfer.h"
#include "NvOnnxParser.h"

#include "cuda_runtime_api.h"

#include <cstdlib>
#include <fstream>
#include <iostream>
#include <sstream>
#include <assert.h>
#include <numeric>
#include <functional>

#include "MyLogger.h"

using namespace std;

#define ONNX_FILE_NAMESTR "F:/dnn-prj/moduleparam/testonnx.onnx"
#define INPUT1_NAME "input1"
#define INPUT2_NAME "input2"
#define OUTPUT1_NAME "output1"
#define OUTPUT2_NAME "output2"

MyLogger glogger;

#define CHECK(status)                                                                                                  \
    do                                                                                                                 \
    {                                                                                                                  \
        auto ret = (status);                                                                                           \
        if (ret != 0)                                                                                                  \
        {                                                                                                              \
            std::cerr << "Cuda failure: " << ret << std::endl;                                                         \
            abort();                                                                                                   \
        }                                                                                                              \
    } while (0)

#define CHECK_RETURN_W_MSG(status, val, errMsg)                                                                        \
    do                                                                                                                 \
    {                                                                                                                  \
        if (!(status))                                                                                                 \
        {                                                                                                              \
            std::cerr << errMsg << " Error in " << __FILE__ << ", function " << FN_NAME << "(), line " << __LINE__     \
                      << std::endl;                                                                                    \
            return val;                                                                                                \
        }                                                                                                              \
    } while (0)

#define CHECK_RETURN(status, val) CHECK_RETURN_W_MSG(status, val, "")

inline void setAllTensorScales(nvinfer1::INetworkDefinition* network, float inScales = 2.0f, float outScales = 4.0f)
{
    // Ensure that all layer inputs have a scale.
    for (int i = 0; i < network->getNbLayers(); i++)
    {
        auto layer = network->getLayer(i);
        for (int j = 0; j < layer->getNbInputs(); j++)
        {
            nvinfer1::ITensor* input{layer->getInput(j)};
            // Optional inputs are nullptr here and are from RNN layers.
            if (input != nullptr && !input->dynamicRangeIsSet())
            {
                input->setDynamicRange(-inScales, inScales);
            }
        }
    }

    // Ensure that all layer outputs have a scale.
    // Tensors that are also inputs to layers are ingored here
    // since the previous loop nest assigned scales to them.
    for (int i = 0; i < network->getNbLayers(); i++)
    {
        auto layer = network->getLayer(i);
        for (int j = 0; j < layer->getNbOutputs(); j++)
        {
            nvinfer1::ITensor* output{layer->getOutput(j)};
            // Optional outputs are nullptr here and are from RNN layers.
            if (output != nullptr && !output->dynamicRangeIsSet())
            {
                // Pooling must have the same input and output scales.
                if (layer->getType() == nvinfer1::LayerType::kPOOLING)
                {
                    output->setDynamicRange(-inScales, inScales);
                }
                else
                {
                    output->setDynamicRange(-outScales, outScales);
                }
            }
        }
    }
}

//使能深度学习加速器
inline void enableDLA(nvinfer1::IBuilder* builder, nvinfer1::IBuilderConfig* config, int useDLACore, bool allowGPUFallback = true)
{
    if (useDLACore >= 0)
    {
        if (builder->getNbDLACores() == 0)
        {
            std::cerr << "Trying to use DLA core " << useDLACore << " on a platform that doesn't have any DLA cores"
                      << std::endl;
            assert("Error: use DLA core on a platfrom that doesn't have any DLA cores" && false);
        }
        if (allowGPUFallback)
        {
            config->setFlag(nvinfer1::BuilderFlag::kGPU_FALLBACK);  //如果不能在DLA加速器执行  启用GPU加速执行
        }
        if (!builder->getInt8Mode() && !config->getFlag(nvinfer1::BuilderFlag::kINT8))
        {
            // User has not requested INT8 Mode.
            // By default run in FP16 mode. FP32 mode is not permitted.
            builder->setFp16Mode(true);
            config->setFlag(nvinfer1::BuilderFlag::kFP16);
        }
        config->setDefaultDeviceType(nvinfer1::DeviceType::kDLA);
        config->setDLACore(useDLACore);
        config->setFlag(nvinfer1::BuilderFlag::kSTRICT_TYPES);  //启用严格的类型约束
    }
}


int main()
{   
    glogger.log(MyLogger::Severity::kINFO,"My First TRT c++.");
//    cout << "Hello World!" << endl;

    //创建builder
    nvinfer1::IBuilder* builder = nvinfer1::createInferBuilder(glogger);

    //创建networkdefinition
    const auto explicitBatch =
            1U << static_cast<uint32_t>(
                      nvinfer1::NetworkDefinitionCreationFlag::kEXPLICIT_BATCH
                      );    //设置显式指定批大小 和 是否显式指定精度kEXPLICIT_PRECISION 按位或后左移1
                        //kEXPLICIT_BATCH用于指定形状是可变的

        // createNetworkV2(0U) 等于 createNetwork（）的行为

    nvinfer1::INetworkDefinition* networkdef = builder->createNetworkV2(explicitBatch);

    //创建配置
    nvinfer1::IBuilderConfig* buildcfg = builder->createBuilderConfig();

    //创建解释器
    nvonnxparser::IParser* parser = nvonnxparser::createParser(*networkdef, glogger);

    //解释onnx格式文件
    bool parseOk = parser->parseFromFile(ONNX_FILE_NAMESTR,static_cast<int>(MyLogger::Severity::kINFO));

    if(!parseOk){
        glogger.log(MyLogger::Severity::kERROR,"parse ONNX file failed!");
    }

    builder->setMaxBatchSize(16);   //设置最大的批数量
    buildcfg->setMaxWorkspaceSize(4*1024*1024*1024LL); //设置最大占用内存

    bool hasFastFp16 = builder->platformHasFastFp16();
    if(hasFastFp16){
        cout<<"Support FP16 :Yes"<<endl;
    }else{
        cout<<"Support FP16 :No"<<endl;
    }

    if(hasFastFp16){      //设置是否使用float16位进行计算
        buildcfg->setFlag(nvinfer1::BuilderFlag::kFP16);    //推荐使用FP16加速 但实际编译不一定是
        buildcfg->setFlag(nvinfer1::BuilderFlag::kSTRICT_TYPES);    //强制一定使用FP16
    }

    cout<<"Support INT8 : "<<(builder->platformHasFastInt8()?"Yes":"No")<<endl;
    if(false){      //设置是否使用 int8 进行计算加速  实际上就是float32 缩放到 整数
        buildcfg->setFlag(nvinfer1::BuilderFlag::kINT8);
        setAllTensorScales(networkdef, 127.0f, 127.0f);     //调整所有的Tensor缩放范围
    }

    enableDLA(builder,
              buildcfg,
              -1     //是否启用 DLA 加速器 >=0表示使用那个DLA
              );

    auto ddd = networkdef->getInput(0)->getDimensions();
//    networkdef->addInput("input1",nvinfer1::DataType::kFLOAT,nvinfer1::Dims4(-1,3,-1,-1));
//    networkdef->addInput("input2",nvinfer1::DataType::kFLOAT,nvinfer1::Dims4(-1,4,-1,-1));
    //context.setBindingDimensions(0, Dims3(3, 150, 250))
    //engine.getBindingDimensions(0) returns a Dims with dimensions {3, -1, -1}

    //创建优化器 用于动态形状维度     context.setOptimizationProfile(0)
    nvinfer1::IOptimizationProfile* profile = builder->createOptimizationProfile();
//    profile->setDimensions("input1", nvinfer1::OptProfileSelector::kMIN, nvinfer1::Dims4(1,3,128,128));
//    profile->setDimensions("input1", nvinfer1::OptProfileSelector::kOPT, nvinfer1::Dims4(1,3,512,512));
//    profile->setDimensions("input1", nvinfer1::OptProfileSelector::kMAX, nvinfer1::Dims4(2,3,1024,1024));

    cout<<"set dims kMIN :" <<profile->setDimensions("input2", nvinfer1::OptProfileSelector::kMIN, nvinfer1::Dims4(1,4,64,64));
    cout<<endl;
    cout<<"set dims kOPT :" <<profile->setDimensions("input2", nvinfer1::OptProfileSelector::kOPT, nvinfer1::Dims4(2,4,512,512));
    cout<<endl;
    cout<<"set dims kMAX :" <<profile->setDimensions("input2", nvinfer1::OptProfileSelector::kMAX, nvinfer1::Dims4(4,4,1024,1024));


    //如果有动态维度的输入输出 需要添加优化器
    //可以添加多个优化器 第一个添加的索引号为0
    //返回值为 当次添加的优化器索引  后面的 context可选择优化器
    buildcfg->addOptimizationProfile(profile);

    //编译构建引擎
    nvinfer1::ICudaEngine* engine = builder->buildEngineWithConfig(*networkdef,*buildcfg);


    //获取输入输出的维度
    assert(networkdef->getNbInputs() >= 2);
    nvinfer1::Dims mInput1Dims = networkdef->getInput(0)->getDimensions();
    nvinfer1::Dims mInput2Dims = networkdef->getInput(1)->getDimensions();

    assert(networkdef->getNbOutputs() >= 2);
    nvinfer1::Dims mOutput1Dims = networkdef->getOutput(0)->getDimensions();
    nvinfer1::Dims mOutput2Dims = networkdef->getOutput(1)->getDimensions();

    if(engine){
        //获取 获取绑定索引的数量 例如输入输出绑定
        //nvinfer1::IExecutionContext也有getNbBindings
        int cnt = engine->getNbBindings();
        assert(cnt>=4);

        size_t bindingBufVol[4];

        for (int i = 0; i < cnt; ++i) {
            nvinfer1::Dims dims = engine->getBindingDimensions(i);    //获取维度结构
            size_t vol = 1; //求buffer的 总共元素数量 初始化为1  可以认为是batchsize = 1
            nvinfer1::DataType type = engine->getBindingDataType(i);    //绑定口的 数据类型
            assert(type == nvinfer1::DataType::kFLOAT);
            int vecDim = engine->getBindingVectorizedDim(i);    //绑定的缓冲区 哪个维度被向量化了
            if (-1 != vecDim) // i.e., 0 != lgScalarsPerVector //存在向量化的维度
            {
                int scalarsPerVec = engine->getBindingComponentsPerElement(i);  //绑定缓冲区被向量化的维度有几个标量
                //因为buffer的第vecDim个维度被向量化了，所以原来的长度是被高估的，此处将它还原?
                //进行向上进位除法
                // 反正这里也没有看懂什么意思  网上的资料比较少 可能用得不多吧
                dims.d[vecDim] = (dims.d[vecDim] + scalarsPerVec - 1)/scalarsPerVec;
                vol *= scalarsPerVec;
            }

            //std::accumulate 遍历 从dims.d到 dims.d + dims.nbDims 进行求和 初始值为1
            //这里用了std::accumulate自定义操作 std::multiplies 即遍历时进行求积
            vol *= std::accumulate(dims.d, dims.d + dims.nbDims, 1, std::multiplies<int64_t>());
            bindingBufVol[i] = vol;
            cout<<"vol["<<i<<"]= "<<vol<<endl;
        }

        auto type = networkdef->getLayer(0)->getPrecision();

        /////////////////////////创建缓冲区内存  准备计算      /// ///////////////////////
        const int infer_BatchSize = 4;  //进行多少批次的推理

        int input1Index = engine->getBindingIndex(INPUT1_NAME);
        int input2Index = engine->getBindingIndex(INPUT2_NAME);
        int output1Index = engine->getBindingIndex(OUTPUT1_NAME);
        int output2Index = engine->getBindingIndex(OUTPUT2_NAME);

        //输入 输出的数据类型必须为 float32
        //这里输入1的维度必须是固定的
        nvinfer1::Dims dims = engine->getBindingDimensions(input1Index);
        assert(dims.nbDims==4);
        assert(dims.d[0]>0);
        assert(dims.d[1]>0);
        assert(dims.d[2]>0);
        assert(dims.d[3]>0);

        int input1batch = infer_BatchSize;//dims.d[0]; //batchsize
        bindingBufVol[0] = input1batch;
        bindingBufVol[0] = bindingBufVol[0]*dims.d[1]*dims.d[2]*dims.d[3];
        float *hostInput1Data = new float[bindingBufVol[0]*4];
        for (int i = 0; i < bindingBufVol[0]*4; ++i) {
            hostInput1Data[i] = 1.0f;
        }

        //输出维度必须为 float32
        dims = engine->getBindingDimensions(output1Index);
        assert(dims.nbDims==4);
        assert(dims.d[0]>0);
        assert(dims.d[1]>0);
        assert(dims.d[2]>0);
        assert(dims.d[3]>0);

        bindingBufVol[2] = input1batch;
        bindingBufVol[2] = bindingBufVol[2]*dims.d[1]*dims.d[2]*dims.d[3];
        float *hostOutput1Data = new float[bindingBufVol[2]*4];

        //这里输入2的维度0,2,3 必须是可变的
//        int input2batch = 4,input2w = 128,input2h = 128;
        nvinfer1::Dims input2_dims = engine->getBindingDimensions(input2Index);
        assert(input2_dims.nbDims==4);
        assert(input2_dims.d[0]==-1);
        assert(input2_dims.d[1]>0);
        assert(input2_dims.d[2]==-1);
        assert(input2_dims.d[3]==-1);

        int input2batch = infer_BatchSize;    //batch size
        input2_dims.d[0] = input2batch;
        input2_dims.d[2] = 64;    //width
        input2_dims.d[3] = 64;     //height
        bindingBufVol[1] = input2batch;
        bindingBufVol[1] = bindingBufVol[1] * input2_dims.d[1] * input2_dims.d[2] * input2_dims.d[3];
        float *hostInput2Data = new float[bindingBufVol[1]*4];
        for (int i = 0; i < bindingBufVol[1]*4; ++i) {
            hostInput2Data[i] = 1.0f;
        }

        //输出维度必须为 float32
        dims = engine->getBindingDimensions(output2Index);
        assert(dims.nbDims==4);
        assert(dims.d[0]==-1);
        assert(dims.d[1]>0);
        assert(dims.d[2]==-1);
        assert(dims.d[3]==-1);

        dims.d[0] = input2_dims.d[0];
        dims.d[2] = input2_dims.d[2];
        dims.d[3] = input2_dims.d[3];
        bindingBufVol[3] = input2batch;
        bindingBufVol[3] = bindingBufVol[3]*dims.d[1]*dims.d[2]*dims.d[3];
        float *hostOutput2Data = new float[bindingBufVol[3]*4];

        ////////创建cuda内存
        void* cudaBuffers[4]={nullptr};
        for (int i = 0; i < 4; ++i) {
            CHECK(cudaMalloc(&cudaBuffers[i],bindingBufVol[i]*4));
        }

        //创建 cuda 执行流 和 tensorrt 执行环境
        cudaStream_t stream;
        CHECK(cudaStreamCreate(&stream));
        nvinfer1::IExecutionContext* context = engine->createExecutionContext();
        assert(context != nullptr);

        //复制主机缓冲区数据到 显卡内存
        CHECK(cudaMemcpyAsync(cudaBuffers[0],
                              hostInput1Data,
                              bindingBufVol[0] * 4,
                              cudaMemcpyHostToDevice, stream));

        CHECK(cudaMemcpyAsync(cudaBuffers[1],
                              hostInput2Data,
                              bindingBufVol[1] * 4,
                              cudaMemcpyHostToDevice, stream));

        //计算 获取结果
        context->setBindingDimensions(input2Index,input2_dims);
        context->enqueue(4, cudaBuffers, stream, nullptr); //执行推理

        CHECK(cudaMemcpyAsync(hostOutput1Data,
                              cudaBuffers[2],
                              bindingBufVol[2] * 4,
                              cudaMemcpyDeviceToHost, stream));

        CHECK(cudaMemcpyAsync(hostOutput2Data,
                              cudaBuffers[3],
                              bindingBufVol[3] * 4,
                              cudaMemcpyDeviceToHost, stream));

        //等待执行完成
        cudaStreamSynchronize(stream);
        cudaStreamDestroy(stream);

        //释放内存
        for (int i = 0; i < 4; ++i) {
            CHECK(cudaFree(cudaBuffers[i]));
        }

        delete[] hostInput1Data;
        delete[] hostInput2Data;
        delete[] hostOutput1Data;
        delete [] hostOutput2Data;

        engine->destroy();
    }

    parser->destroy();
    buildcfg->destroy();
    networkdef->destroy();
    builder->destroy();
    return 0;
}
