/*
* general infer engine
*/
#pragma once

#include <string>
#include <memory>
#include <vector>
#include <opencv2/core/core.hpp>
#include "NvInfer.h"

#include "vortex/core/blob.h"
#include "vortex/core/nvlogger.h"


namespace vortex
{
    class ImageInferBase
    {
    protected:
        NvLogger m_Logger;
        cudaStream_t m_Stream;
        nvinfer1::ICudaEngine* m_Engine = nullptr;
        nvinfer1::IExecutionContext* m_Context = nullptr;
        nvinfer1::IRuntime* m_Runtime = nullptr;

        uint32_t m_BatchSize;
        std::vector<std::string> m_InputNames;
        std::vector<std::string> m_OutputNames;
        std::vector<std::shared_ptr<BlobF>> m_InputBlobs;
        std::vector<std::shared_ptr<BlobF>> m_OutputBlobs;
        
    public:
        ImageInferBase() {}
        virtual ~ImageInferBase();
        
        bool SetIOInfo(
            const std::vector<BlobInfo>& input_info,
            const std::vector<BlobInfo>& output_info);
        bool LoadEngine(const std::string& engine_path);

        // inference is done for total batch of inputs
        virtual void InternalInfer();
    };
}
