#include <iostream>
#include <stdexcept>
#include <string>
#include <onnxruntime_cxx_api.h>

#include "utils/image_processing/image_process.h"

class OrtEngine {
public:
  OrtEngine(const std::string& model_path)
    : env(ORT_LOGGING_LEVEL_WARNING, "ONNXRuntime"),
      session_options(),
      session(env, model_path.c_str(), session_options) {
    session_options.SetIntraOpNumThreads(1);
    session_options.SetGraphOptimizationLevel(
      GraphOptimizationLevel::ORT_ENABLE_BASIC);

    if (!session) {
      throw std::runtime_error("Failed to create ONNXRuntime session");
    }
  }

  ~OrtEngine() {}

  template <typename T>
  std::vector<T> preProcess(const cv::Mat& image) {
    return tcv::PreprocessImage<T>(image, input_shape);
  }

  std::vector<float> infer(const std::vector<float>& input_tensor_values) {
    Ort::AllocatorWithDefaultOptions allocator;

    std::string input_name = getInputName();
    std::string output_name = getOutputName();

    const char* input_name_ptr = input_name.c_str();
    const char* output_name_ptr = output_name.c_str();

    Ort::MemoryInfo memory_info =
      Ort::MemoryInfo::CreateCpu(OrtArenaAllocator, OrtMemTypeDefault);

    Ort::Value input_tensor = Ort::Value::CreateTensor<float>(
      memory_info, const_cast<float*>(input_tensor_values.data()),
      input_tensor_values.size(), input_shape.data(), input_shape.size());

    auto output_tensors = session.Run(Ort::RunOptions{nullptr}, &input_name_ptr,
                                      &input_tensor, 1, &output_name_ptr, 1);

    float* floatarr = output_tensors[0].GetTensorMutableData<float>();

    size_t output_tensor_size =
      output_tensors[0].GetTensorTypeAndShapeInfo().GetElementCount();

    return std::vector<float>(floatarr, floatarr + output_tensor_size);
  }

  std::vector<tcv::Detection> postProcess(const std::vector<float>& results,
                                          float confidence_threshold,
                                          int img_width, int img_height,
                                          int orig_width, int orig_height) {
    return tcv::FilterDetections(results, confidence_threshold, img_width,
                                 img_height, orig_width, orig_height);
  }

  cv::Mat drawLabels(const cv::Mat& image,
                     const std::vector<tcv::Detection>& detections) {
    return tcv::DrawLabels(image, detections);
  }

  // batch, channel, height, width
  std::vector<int64_t> input_shape{1, 3, 640, 640};

private:
  Ort::Env env;
  Ort::SessionOptions session_options;
  Ort::Session session;

  std::string getInputName() {
    Ort::AllocatorWithDefaultOptions allocator;
    Ort::AllocatedStringPtr name_allocator =
      session.GetInputNameAllocated(0, allocator);
    return std::string(name_allocator.get());
  }

  std::string getOutputName() {
    Ort::AllocatorWithDefaultOptions allocator;
    Ort::AllocatedStringPtr name_allocator =
      session.GetOutputNameAllocated(0, allocator);
    return std::string(name_allocator.get());
  }
};

int main(int argc, char** argv) {
  if (argc != 3) {
    std::cerr << "Usage: " << argv[0] << "<image_path> <model_path> "
              << std::endl;
    return 1;
  }

  std::string image_path = argv[1];
  std::string model_path = argv[2];

  OrtEngine engine(model_path);
  // Load the image
  cv::Mat src = cv::imread(image_path);

  // cv::Mat src = cv::Mat(im.height, im.width, CV_8UC3, im.data);
  int src_width = src.cols;
  int src_height = src.rows;
  printf("src_width: %d, src_height: %d\n", src_width, src_height);

  // Preprocess the image
  auto input_tensor_values = engine.preProcess<float>(src);

  // Run inference
  std::vector<float> output = engine.infer(input_tensor_values);

  // Postprocess the results
  float confidence_threshold = 0.3;
  // int img_width = src.cols;
  // int img_height = src.rows;

  std::vector<tcv::Detection> detections =
    engine.postProcess(output, confidence_threshold, engine.input_shape[2],
                       engine.input_shape[3], src_width, src_height);

  printf("[DEBUG] detections size: %zu\n", detections.size());
  // Draw labels on the image
  cv::Mat result_image = engine.drawLabels(src, detections);

  // Save or display the result image
  cv::imwrite("output_inference.jpg", result_image);

  return 0;
}
