#include <iostream>
#include <string>
#include <vector>
#include <memory>
#include <gtest/gtest.h>

#include "core/session/onnxruntime_cxx_api.h"
#include "allocator.h"

using namespace onnxruntime;

void RunSession(ONNXRuntimeAllocator *env, ONNXSession *session_object,
                const std::vector<size_t> &dims_x,
                const std::vector<float> &values_x)
{
  std::unique_ptr<ONNXValue, decltype(&ReleaseONNXValue)> value_x(nullptr, ReleaseONNXValue);
  std::vector<ONNXValuePtr> inputs(1);
  inputs[0] = ONNXRuntimeCreateTensorAsONNXValue(env, dims_x, ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT);
  value_x.reset(inputs[0]);
  void *raw_data;
  ONNXRUNTIME_THROW_ON_ERROR(ONNXRuntimeGetTensorMutableData(inputs[0], &raw_data));
  memcpy(raw_data, values_x.data(), values_x.size() * sizeof(values_x[0]));
  std::vector<const char *> input_names{"data_0"};
  ONNXValuePtr output_tensor = nullptr;
  const char *output_names[] = {"softmaxout_1"};
  ONNXRUNTIME_THROW_ON_ERROR(ONNXRuntimeRunInference(session_object, NULL, input_names.data(), inputs.data(), inputs.size(), output_names, 1, &output_tensor));
  ASSERT_NE(output_tensor, nullptr);
  std::unique_ptr<ONNXRuntimeTensorTypeAndShapeInfo> shape_info;
  {
    ONNXRuntimeTensorTypeAndShapeInfo *shape_info_ptr;
    ONNXRUNTIME_THROW_ON_ERROR(ONNXRuntimeGetTensorShapeAndType(output_tensor, &shape_info_ptr));
    shape_info.reset(shape_info_ptr);
  }
  size_t rtensor_dims = ONNXRuntimeGetNumOfDimensions(shape_info.get());
  std::vector<int64_t> shape_array(rtensor_dims);
  ONNXRuntimeGetDimensions(shape_info.get(), shape_array.data(), shape_array.size());
  size_t total_len = 1;
  for (size_t i = 0; i != rtensor_dims; ++i)
  {
    total_len *= shape_array[i];
  }
  float *output_tensor_ptr;
  ONNXRUNTIME_THROW_ON_ERROR(ONNXRuntimeGetTensorMutableData(output_tensor, (void **)&output_tensor_ptr));
  printf("output tensor length: %d\n", total_len);
  for(int i = 0; i < 32; i++){
    printf("%f, ", output_tensor_ptr[i]);
  }
  ReleaseONNXValue(output_tensor);
}

int main()
{
  const char* model_path = "../../csharp/testdata/squeezenet.onnx";
  ONNXRuntimeEnv *env = nullptr;
  ONNXRuntimeInitialize(ONNXRUNTIME_LOGGING_LEVEL_kFATAL, "Default", &env);
  SessionOptionsWrapper sf(env);
  std::unique_ptr<ONNXSession, decltype(&ReleaseONNXSession)> inference_session(sf.ONNXRuntimeCreateInferenceSession(model_path), ReleaseONNXSession);
  std::unique_ptr<ONNXRuntimeAllocator> default_allocator(MockedONNXRuntimeAllocator::Create());

  // prepare inputs
  std::vector<size_t> dims_x = {1,3,224,224};
  std::vector<float> values_x(1*3*224*224, 0);

  // Now run
  RunSession(default_allocator.get(), inference_session.get(), dims_x, values_x);

  return 0;
}