#include <iostream>
#include <fstream>
#include "NvOnnxParser.h"
#include "NvInfer.h"
#include "opencv2/opencv.hpp"
#include <cuda_runtime_api.h>
#include <thread>
#include <atomic>
#include<ctime>

using namespace cv;
using namespace nvinfer1;
using namespace nvonnxparser;
using namespace std;

std::atomic<bool> stopFlag(false);
void PrintStatus()
{
    clock_t  Begin, End;
    Begin = clock();
    while (!stopFlag.load(std::memory_order_relaxed))
    {
        std::cout << "Code is still running..." << std::endl;
        std::this_thread::sleep_for(std::chrono::seconds(1));
    }
    End = clock();
    std::cout << "Time=" <<End-Begin<< std::endl;
}

class Logger : public ILogger
{

    //void log(Severity severity, const char* msg) override
    void log(Severity severity, nvinfer1::AsciiChar const* msg) noexcept override
    {
        // suppress info-level messages
        if (severity <= Severity::kWARNING)
            std::cout << msg << std::endl;
    }
} gLogger;


int main() {

    // 实例化builder
    IBuilder* builder = createInferBuilder(gLogger);
    nvinfer1::INetworkDefinition* network = builder->createNetworkV2(1U << static_cast<uint32_t>(NetworkDefinitionCreationFlag::kEXPLICIT_BATCH));

    // 加载onnx文件
    nvonnxparser::IParser* parser = nvonnxparser::createParser(*network, gLogger);
    const char* onnx_filename = "final.onnx";
    parser->parseFromFile(onnx_filename, static_cast<int>(Logger::Severity::kWARNING));
    for (int i = 0; i < parser->getNbErrors(); ++i)
    {
        std::cout << parser->getError(i)->desc() << std::endl;
    }
    std::cout << "successfully load the onnx model" << std::endl;

    // 创建引擎
    unsigned int maxBatchSize = 1;
    builder->setMaxBatchSize(maxBatchSize);
    IBuilderConfig* config = builder->createBuilderConfig();
    config->setMaxWorkspaceSize(1 << 20);
    //config->setFlag(BuilderFlag::kFP16);
    config->setDefaultDeviceType(DeviceType::kGPU);
    std::thread statusThread(PrintStatus);
    ICudaEngine* engine = builder->buildEngineWithConfig(*network, *config);
    // 序列化
    IHostMemory* serializedModel = engine->serialize();
    std::ofstream serialize_output_stream("final2.trt", std::ios_base::out | std::ios_base::binary);;
    serialize_output_stream.write((char*)serializedModel->data(), serializedModel->size());
    serialize_output_stream.close();
    stopFlag.store(true, std::memory_order_relaxed);
    statusThread.join();
    
    delete parser;
    delete network;
    delete config;
    delete builder;





    return 0;
}
