/**
* \brief 
* \author pengcheng (pengcheng@yslrpch@126.com)
* \date 2020-05-30
* \attention CopyrightÃ‚Â©ADC Technology(tianjin)Co.Ltd
* \attention Refer to COPYRIGHT.txt for complete terms of copyright notice
*/
#ifndef TENSORRT_INFERENCE_H___
#define TENSORRT_INFERENCE_H___

#include <memory>
#include <tensor_rt.pb.h>
#include <stdint.h>
#include <NvInferPlugin.h>
#include <cuda_runtime_api.h>
#include <numeric>
// #include <cudnn.h>

#include "onnx_tensorrt/NvOnnxParser.h"
#include "onnx_tensorrt/NvOnnxParserRuntime.h"
#include "detection_vision/tensorrt/impl/utils.hpp"
#include "detection_vision/tensorrt/calibration.h"


namespace tensorrt_inference
{
class TRTInfernce
{

public:
    using Ptr = std::shared_ptr<TRTInfernce>;
    using ConstPtr = std::shared_ptr<const TRTInfernce>;

    explicit TRTInfernce(const TensorRTConfig& config);
    virtual ~TRTInfernce();

    Tensor GetTensor(const int64_t index);

    enum TensorType : uint8_t
    {
        UNKONWN = 1,
        FLOAT,
        HALF,
        INT8
    };
    typedef typename std::underlying_type<TensorType>::type TensorTypeUnderlying;

    inline TensorTypeUnderlying tensor_type() const
    {
        return tensor_type_;
    }

    inline void tensor_type(TensorType tensor_type)
    {
        tensor_type_ = tensor_type;
    }

    inline void tensor_type(uint8_t tensor_type)
    {
        tensor_type_ = static_cast<TensorType>(tensor_type);
    }
    inline const int64_t nb_bingings()
    {
        return nb_bindings_;
    }

    inline bool GetBuffDataGPU(const unsigned int index, void** buff)
    {
        if(index > nb_bindings_) return false;
        *buff = cuda_cuffers_[index];
        return true;
    }
    inline const std::vector<int64_t> bind_buffer_sizes()
    {
        return bind_buffer_sizes_;
    }

    void Inferrence(const void *inputData);
    void InitEngine();
    void OnnxToGIEModel(Int8CacheCalibrator& calibrator);
private:
    TensorType tensor_type_;
    TensorRTConfig config_;
    int64_t nb_bindings_;
    // int64_t output_size_;

    Logger gLogger_;
    nvinfer1::IExecutionContext* context_;
    nvinfer1::ICudaEngine* engine_;
    nvinfer1::IRuntime* runtime_;

    nvonnxparser::IPluginFactory *plugin_factory_;
    cudaStream_t stream_;
    Profiler profiler_;

    std::vector<void*> cuda_cuffers_;
    std::vector<int64_t> bind_buffer_sizes_;
    void * cuda_input_buffer_;
    
};
}
#endif
