#include <iostream>
#include <fstream>
#include <vector>
#include <cassert>
#include <cstring>
#include <cuda_runtime_api.h>
#include <cuda_runtime.h>
#include <NvInfer.h>

#include <NvInferRuntimeCommon.h>
#include <sys/stat.h>
#include <glog/logging.h>
#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>
#include <NvInferPlugin.h>
#include <ctime>
using namespace nvinfer1;
using namespace std;
using namespace cv;


// 创建ILogger类型的全局对象。它是TensorRTAPI的各种方法的必需参数。
class TRT_Logger : public nvinfer1::ILogger
{
    nvinfer1::ILogger::Severity _verbosity;
    std::ostream* _ostream;

public:
    TRT_Logger(Severity verbosity = Severity::kWARNING, std::ostream& ostream = std::cout)
        : _verbosity(verbosity)
        , _ostream(&ostream)
    {
    }
    void log(Severity severity, const char* msg) noexcept override
    {
        if (severity <= _verbosity)
        {
            time_t rawtime = std::time(0);
            char buf[256];
            strftime(&buf[0], 256, "%Y-%m-%d %H:%M:%S", std::gmtime(&rawtime));
            const char* sevstr = (severity == Severity::kINTERNAL_ERROR ? "    BUG" : severity == Severity::kERROR
                        ? "  ERROR"
                        : severity == Severity::kWARNING ? "WARNING" : severity == Severity::kINFO ? "   INFO"
                                                                                                   : "UNKNOWN");
            (*_ostream) << "[" << buf << " " << sevstr << "] " << msg << std::endl;
        }
    }
};


int main()
{
    // 指定模型引擎文件的路径
    TRT_Logger gLogger;
    const std::string enginePath = "/home/host/mzz/cpp_trt/pidnet_11j_best.engine"; // 替换为您的模型引擎文件路径

    // 初始化TensorRT。createInferRuntime 是一个用于创建 TensorRT 推理运行时对象的函数。
    // 函数接受一个参数，通常是用于日志记录的对象，以便在运行时记录 TensorRT 相关的信息。
    IRuntime* runtime = createInferRuntime(gLogger);  // 
    assert(runtime != nullptr);

    std::ifstream engineFile(enginePath, std::ios::binary);
    assert(engineFile.is_open());

    // 从引擎文件中反序列化引擎
    engineFile.seekg(0, engineFile.end);
    int engineSize = engineFile.tellg();
    engineFile.seekg(0, engineFile.beg);
    std::vector<char> engineData(engineSize);
    engineFile.read(engineData.data(), engineSize);
    engineFile.close();

    // 创建CudaEngine
    
    // 在deserializeCudaEngine()前加上
    bool didInitPlugins = initLibNvInferPlugins(nullptr, "");

    ICudaEngine* engine = runtime->deserializeCudaEngine(engineData.data(), engineSize, nullptr);
    assert(engine != nullptr);

    // 创建推理上下文
    IExecutionContext* context = engine->createExecutionContext();
    assert(context != nullptr);
    
    cudaStream_t stream = nullptr;
    // 创建CUDA流，以确定这个batch的推理是独立的
    cudaStreamCreate(&stream);
    //int inputBindingIndex = engine->getBindingIndex("input");
    
    //nvinfer1::Dims inputDims = {1, 3, 1024, 1536};
    //context->setBindingDimensions(inputBindingIndex, inputDims);

    // 分配GPU内存
    // 分配GPU内存
    const int batchSize = 1;
    const int inputChannels = 3; // 三通道图片
    const int inputHeight = 1024;
    const int inputWidth = 1536;
    const int inputSize = inputChannels * inputHeight * inputWidth * sizeof(float);

 
    // 输出1的大小
    const int outputSize1 = 1*2*128*192* sizeof(float)/* 第一个输出的大小 */;
    // 输出2的大小
    // const int outputSize2 = 1*8*96*320* sizeof(float)/* 第二个输出的大小 */;

    void* buffers[2]; // 3个缓冲区（1个输入和2个输出）
    cudaMalloc(&buffers[0], inputSize);
    cudaMalloc(&buffers[1], outputSize1);

    // 准备输入数据（假设您有一张RGB图片）
    cv::Mat inputImage = cv::imread("/home/host/rs00159.png"); // 替换为您的图片路径
    cv::Mat resizedImage;
    cv::resize(inputImage, resizedImage, cv::Size(inputWidth, inputHeight));
    resizedImage.convertTo(resizedImage, CV_32FC3); // 

    // 将图像从HWC转换为CHW
    // vector<float> a(inputSize);
    float* input_last = new float[inputSize];
    // cv::Mat chwImage(inputChannels, inputHeight, inputWidth, CV_32FC3);
    for (int c = 0; c < inputChannels; ++c)
    {
        for (int h = 0; h < inputHeight; ++h)
        {
            for (int w = 0; w < inputWidth; ++w)
            {   
                // cout<<"helloworld:"<<(c * inputHeight * inputWidth + h * inputWidth + w)<<endl;
                input_last[c * inputHeight * inputWidth + h * inputWidth + w] = static_cast<float>(resizedImage.at<cv::Vec3f>(h, w)[c]) / 255.0f;
            }
        }
    }
    
    // 将输入数据复制到GPU
    //  cout<<"helloworld:2";
    float* inputData = new float[inputChannels * inputHeight * inputWidth];
    memcpy(inputData, input_last, inputSize);
   // for (int i=0;i<(inputChannels * inputHeight * inputWidth);i++){
   //     cout<<"test:"<<inputData[i]<<endl;
   // }
    cudaMemcpy(buffers[0], inputData, inputSize, cudaMemcpyHostToDevice);
//    cout<<"test1"<<endl;
    // 执行推理
    
    clock_t start = clock();
  //  context->execute(batchSize, buffers);
    context->enqueueV2((void**)buffers, stream, nullptr);
  //  cout<<"test2"<<endl;
    // 获取输出数据
    float* outputData1 = new float[outputSize1];
    // float* outputData2 = new float[outputSize2];
    // ...
    cudaMemcpy(outputData1, buffers[1], outputSize1, cudaMemcpyDeviceToHost);
    clock_t end = clock();
    double elapsed = static_cast<double>(end - start) / CLOCKS_PER_SEC;
    std::cout << "Elapsed time: " << elapsed << " seconds" << std::endl;

    std::ofstream outputFile1("output1.txt");
    if (outputFile1.is_open())
    {
        for (int i = 0; i < outputSize1/(sizeof(float)); i++)
        {
            outputFile1 << outputData1[i] << " ";
        }
        outputFile1.close();
    }
    // 释放资源
    delete[] inputData;
    delete[] outputData1;
    //delete[] outputData2;
    delete[] input_last;
    // ...
    context->destroy();
    engine->destroy();
    runtime->destroy();

    return 0;
}


