#ifndef TRT_PREDICTOR_H
#define TRT_PREDICTOR_H

#include <fstream>
#include <iostream>
#include <string>
#include <cstring>

#include <NvInfer.h>
#include <cuda_runtime_api.h>
using namespace nvinfer1;

#include "logging.h"
#include "trt_utils.h"


#define CHECK(status) \
    do\
    {\
        auto ret = (status);\
        if (ret != 0)\
        {\
            std::cerr << "Cuda failure: " << ret << std::endl;\
            abort();\
        }\
    } while (0)

class TRTPredictor
{
private:
    int device_id;

    char *trtModelStream;
    size_t model_size;

    char input_blob_name[128];
    char output_blob_name[128];

    Logger gLogger;

    IRuntime* runtime;
    ICudaEngine* engine;
    IExecutionContext* context;

    size_t input_typesize;
    size_t output_typesize;
    
    Dims out_dims;
    int32_t output_size;

    bool config;

public:
    TRTPredictor();
    ~TRTPredictor();

    bool load_model(std::string model_path);
    bool set_input_name(std::string input_name);
    bool set_output_name(std::string output_name);
    bool set_device_id(int id);
    bool set_input_typesize(size_t typesize);
    bool set_output_typesize(size_t typesize);

    bool init();
    void destroy();

    size_t get_output_size();
    Dims get_output_dims();

    void inference(void* input, size_t input_size, void* output);
};

#endif