#ifndef TRT_INFER_H_
#define TRT_INFER_H_

#include <mutex>
#include <unordered_map>
#include <vector>

#include "cuda_common.h"
#include "tensorrt_common.h"

namespace autodrive {
namespace perception {

class TrtInfer {
 public:
  TrtInfer();
  virtual ~TrtInfer();
  TrtInfer(TrtInfer const&) = delete;
  TrtInfer& operator=(TrtInfer const&) = delete;
  TrtInfer(TrtInfer&&) = delete;
  TrtInfer& operator=(TrtInfer&&) = delete;

 public:
  struct TrtInferSettings {
    /// @brief model file path
    std::string model_file;
    /// @brief use cuda device
    std::int32_t use_cuda_device;
    /// @brief use dla, -1 disable
    std::int32_t use_dla_core;
    /// @brief max batch size
    std::int32_t max_batch_size;
    /// @brief max work space size, GB
    std::int32_t max_work_space_size;
    /// @brief fp16, int8
    std::string scaler_type;
  };

  struct IOTensor {
    ///@brief tensort name
    std::string name;
    ///@brief index
    std::int32_t index;
    /// @brief io mode
    nvinfer1::TensorIOMode mode;
    /// @brief shape
    nvinfer1::Dims shape;
    /// @brief data type
    nvinfer1::DataType data_type;
    ///@brief  n*c*h*w*sizeof(datatype)
    std::int32_t size;
    ///@brief n*c*h*w
    std::int32_t count;
    /// @brief memory pointer
    CUDAPinnedMemoryPtr mem_ptr;
  };

  using IOTensorMap = std::unordered_map<std::string, IOTensor>;

 public:
  /// @brief get trt infer settings
  /// @return settings
  inline TrtInferSettings const& Settings() const noexcept { return settings_; }

  /// @brief get build status
  /// @return build flag
  inline bool IsBuild() const noexcept { return is_build_; }

  /// @brief get network io tensors
  /// @return network io tensor dict
  IOTensorMap const& IOTensors() const noexcept { return io_tensor_map_; }

  /// @brief build
  /// @param settings settings
  void Build(TrtInferSettings const& settings) noexcept(false);

  /// @brief do inference
  /// @param err error message
  /// @return if no error return true, otherwise return false
  void DoInfer() noexcept(false);

  void Release() noexcept;

 protected:
  /// @brief
  /// @param settings
  /// @return
  NvinferHostMemorySharePtr construct_plan(
      TrtInferSettings const& settings) noexcept(false);

  ///@brief serialize plan
  void serialize_plan(NvinferHostMemorySharePtr nvinferPlanPtr,
                      std::string const& file) noexcept(false);

  ///@brief read cache file and deserialize plan
  void deserialize_plan(std::string const& file, std::string& cache) const
      noexcept(false);

  /// @brief build cuda engine
  /// @param settings tensorrt infer settings
  std::pair<nvinfer1::IRuntime*, nvinfer1::ICudaEngine*> build_cuda_engine(
      TrtInferSettings const& settings,
      std::string const& plan_file) noexcept(false);

  /// @brief build cuda engine with retry
  /// @param settings tensorrt infer settings
  /// @param cudaEnginePtr cuda engine pointer
  std::pair<nvinfer1::IRuntime*, nvinfer1::ICudaEngine*>
  build_cuda_engine_retry(TrtInferSettings const& settings) noexcept(false);

  /// @brief get trt tensort io according index
  /// @param engine_ptr engine pointer
  /// @param index binding index
  /// @return trt tensor io
  IOTensor get_io_tensor(nvinfer1::ICudaEngine const* engine_ptr,
                         std::int32_t index) noexcept(false);

  /// @brief get data type size
  /// @param data type
  /// @return size
  std::int32_t get_date_type_size(nvinfer1::DataType t) noexcept;

  /// @brief enumerate io tensors
  /// @param engine_ptr engine pointer
  /// @param map io tensor map
  void enumerate_io_tensors(nvinfer1::ICudaEngine const* engine_ptr,
                            IOTensorMap& map) noexcept(false);

  /// @brief
  /// @param tensor
  /// @param stream
  void print_io_tensor(IOTensor const& tensor,
                       std::iostream& stream) const noexcept;

 private:
  /// @brief is build
  bool is_build_;
  /// @brief lock
  std::mutex lock_;
  /// @brief
  std::once_flag build_flag_;
  /// @brief settings
  TrtInferSettings settings_;
  /// @brief runtime pointer
  nvinfer1::IRuntime* nvinfer_runtime_ptr_;
  /// @brief engine pointer
  nvinfer1::ICudaEngine* nvinfer_engine_ptr_;
  ///@brief context and engine context
  nvinfer1::IExecutionContext* nvinfer_context_ptr_;
  /// @brief io tensor map
  IOTensorMap io_tensor_map_;
  /// @brief bindings
  std::vector<void*> bindings_;
};

}  // namespace perception
}  // namespace autodrive

#endif