#include <iostream>
#include <onnxruntime_cxx_api.h>

#ifdef __WIN32
constexpr const wchar_t *g_onnx_model_path = L"../leNet.onnx";
#else
constexpr const char *g_onnx_model_path = (const char *)"../leNet.onnx";
#endif

constexpr int g_width = 32;
constexpr int g_height = 32;
constexpr int g_categories = 5;

std::array<int64_t, 4> g_input_shape{1, 1, g_height, g_width};
std::array<int64_t, 2> g_output_shape{1, g_categories};

std::array<float, g_width * g_height> g_input_image;
std::array<float, g_categories> g_result;

int main(int argc, char *argv[]) {

  Ort::Env g_env;
  Ort::Session g_session{g_env, g_onnx_model_path, Ort::SessionOptions{nullptr}};

  auto g_memory_info =
      Ort::MemoryInfo::CreateCpu(OrtDeviceAllocator, OrtMemTypeCPU);

  Ort::Value g_input_tensor = Ort::Value::CreateTensor<float>(
      g_memory_info, g_input_image.data(), g_input_image.size(),
      g_input_shape.data(), g_input_shape.size());

  Ort::Value g_output_tensor = Ort::Value::CreateTensor<float>(
      g_memory_info, g_result.data(), g_result.size(), g_output_shape.data(),
      g_output_shape.size());

  const char *g_input_names[] = {"input"};
  const char *g_output_names[] = {"output"};

  g_session.Run(Ort::RunOptions{nullptr}, g_input_names, & g_input_tensor, 1, g_output_names, &g_output_tensor, 1);

  for (int i = 0; i < (int)g_result.size(); ++i) {
    std::cout << g_result[i] << " ";
  }
  std::cout << std::endl;
  return 0;
}