#pragma once
#include <string>
#include <vector>
#include <iostream>
#include <stdint.h>
#include <opencv2/opencv.hpp>
#include <NvInfer.h>
#include <NvOnnxParser.h>
#include <cuda_fp16.h>
#include <cuda_runtime.h>

#include "model_struct.h"
#include "base_predictor.h"
namespace ai {

class ModelConfig;
class TRTModelManager;

#define MAX_SUPPORT_CHANNEL 16

// Precision used for GPU inference
enum class Precision {
    // Full precision floating point value
    FP32,
    // Half prevision floating point value
    FP16,
    // Int8 quantization.
    // Has reduced dynamic range, may result in slight loss in accuracy.
    // If INT8 is selected, must provide path to calibration dataset directory.
    INT8,
};

// Options for the network
struct Options {
    // Precision to use for GPU inference.
    Precision precision = Precision::FP16;
    // If INT8 precision is selected, must provide path to calibration dataset
    // directory.
    std::string calibrationDataDirectoryPath;
    // The batch size to be used when computing calibration data for INT8
    // inference. Should be set to as large a batch number as your GPU will
    // support.
    int32_t calibrationBatchSize = 128;
    // The batch size which should be optimized for.
    int32_t optBatchSize = 1;
    // Maximum allowable batch size
    int32_t maxBatchSize = 16;
    // GPU device index
    int deviceIndex = 0;
};

// Class to extend TensorRT logger
class TRTLogger : public nvinfer1::ILogger {
  void log(Severity severity, const char *msg) noexcept {
    // Only log Warnings or more important.
    if (severity <= Severity::kWARNING) {
      std::cout << msg << std::endl;
    }
  }
};


struct TRTPackPredictor : public BasePackPredictor {
  TRTPackPredictor(): 
  BasePackPredictor() {}



  // Holds pointers to the input and output GPU buffers
  std::vector<void *> m_buffers;
  std::vector<uint32_t> m_outputLengths{};
  std::vector<nvinfer1::Dims3> m_inputDims;
  std::vector<nvinfer1::Dims> m_outputDims;
  std::vector<std::string> m_IOTensorNames;
  int32_t m_inputBatchSize;

  // Must keep IRuntime around for inference, see:
  // https://forums.developer.nvidia.com/t/is-it-safe-to-deallocate-nvinfer1-iruntime-after-creating-an-nvinfer1-icudaengine-but-before-running-inference-with-said-icudaengine/255381/2?u=cyruspk4w6
  nvinfer1::IRuntime* m_runtime = nullptr;
  // std::unique_ptr<Int8EntropyCalibrator2> m_calibrator = nullptr;
  nvinfer1::ICudaEngine* m_engine = nullptr;
  nvinfer1::IExecutionContext* m_context = nullptr;
  Options m_options;
  TRTLogger logger_trt;
};

}   // namespace ai 
