#pragma once
#include <chrono>
#include <cmath>
#include <exception>
#include <fstream>
#include <iostream>
#include <limits>
#include <numeric>
#include <string>
#include <queue>
#include <vector>
#include <opencv2/dnn/dnn.hpp>
#include <opencv2/imgcodecs.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/core/cuda.hpp>
#include <opencv2/cudaarithm.hpp>
#include <opencv2/cudaimgproc.hpp>
#include <opencv2/cudawarping.hpp>
#include <opencv2/opencv.hpp>

#include <cuda_fp16.h>
#include <cuda_runtime.h>
#include <NvInfer.h>
#include <NvOnnxParser.h>

#include "trtmodel_error_code.h"
#include "trtmodel_struct.h"
#include "model_manager.h"
#include "common/base_struct.h"


namespace ai {

class ModelConfig;
class TRTPredictor;
class TRTClsPredictor;
class TRTOcrRecPredictor;
class TRTOCRDetector;
class TRTDetPredictor;
class TRTSegPredictor;
class TRTOCRPostProcessor;



class TRTModelManager: public ai::ModelManager
{
public:
  TRTModelManager(LogInfo *log_info);
  ~TRTModelManager();

  int init_model_imp(std::string model_path, std::string cfg_path, int gpu_idx, ai::InitModelData &imd, BasePredictor** infer, int debug_mode);

  int free_model(int md_idx);

  int loadNetwork(std::string trtModelPath, ai::TRTPackPredictor* trtp);
  int loadNetwork(std::vector<char> buffer, ai::TRTPackPredictor* trtp);
  
private:
  int load_model(std::string onnx_model_path, ai::TRTPackPredictor* oxp, std::string model_id);

  int buildNetwork(std::string onnxModelPath, std::string trtModelPath, ai::TRTPackPredictor* trtp);
  void clearGpuBuffers(TRTPackPredictor* mdl_trt);
  void getDeviceNames(std::vector<std::string> &deviceNames);

private:
  // friend class ClsPredictor;
  friend class TRTOcrRecPredictor;
  // friend class OCRDetector;
  friend class TRTDetPredictor;
  // friend class SegPredictor;

};

inline void checkCudaErrorCode(cudaError_t code);

}