#ifndef TRT_MODEL_DEPLOY_H
#define TRT_MODEL_DEPLOY_H

#include <NvInfer.h>
#include <NvOnnxParser.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include <iostream>
#include <fstream>
#include <opencv2/opencv.hpp>
#include <opencv2/dnn.hpp>
#include "common.h"
#include "trthelper.h"

using namespace nvonnxparser;
using namespace nvinfer1;

class trtModelDeploy
{
public:
    public:
    explicit trtModelDeploy(const std::string& modelPath)
    {
        loadEngine(modelPath);
        // cudaStreamCreate(&stream);
    }
    virtual void infer(const cv::Mat& image) = 0; // 必须实现的行为

    virtual ~trtModelDeploy() = default; // 虚析构函数
    virtual cv::Mat preprocess(const cv::Mat& image) = 0;
    virtual void postprocess(const cv::Mat& image) = 0;

protected:
    std::unique_ptr<nvinfer1::ICudaEngine> m_engine;
    std::unique_ptr<nvinfer1::IRuntime> m_runtime;
    std::unique_ptr<nvinfer1::IExecutionContext> m_context;
    Logger m_logger;

    cudaStream_t stream;

     // 共用的辅助函数
    virtual void loadEngine(const std::string& enginePath)
    {
        trtModel trt_model = TRTHelper::getInstance().load_model(enginePath.c_str());
        
        m_runtime.reset(createInferRuntime(m_logger));
        m_engine.reset(m_runtime->deserializeCudaEngine(trt_model.model, trt_model.size));
        m_context.reset(m_engine->createExecutionContext());
        cudaStreamCreate(&stream);
        // int32_t num =  m_engine->getNbBindings();
        // std::cout << "num = ？？？？ " << num << std::endl;
    }
    
};

#endif
