#include <iostream>
#include <string>
#include <vector>
#include <array>
#include <chrono>

#include <onnxruntime_cxx_api.h>
#include "OnnxEmbedder.h"

using namespace std;

OnnxEmbedder::OnnxEmbedder(const std::string &model_path, bool use_cuda)
    : env_(ORT_LOGGING_LEVEL_WARNING, "embed_demo")
{

    session_options_.SetInterOpNumThreads(1);
    if (use_cuda)
    {
        std::cout << "Using GPU Execution Provider" << std::endl;
        OrtCUDAProviderOptions cuda_options;
        cuda_options.device_id = 1;
        cuda_options.arena_extend_strategy = 0;
        cuda_options.gpu_mem_limit = SIZE_MAX;
        cuda_options.cudnn_conv_algo_search = OrtCudnnConvAlgoSearchExhaustive;
        cuda_options.do_copy_in_default_stream = 1;
        session_options_.AppendExecutionProvider_CUDA(cuda_options);
    }
    else
    {
        std::cout << "Using CPU Execution Provider " << std::endl;
    }

    session_ = std::make_unique<Ort::Session>(env_, model_path.c_str(), session_options_);
    allocator_ = std::make_unique<Ort::AllocatorWithDefaultOptions>();

    size_t num_input_nodes = session_->GetInputCount();
    for (size_t i = 0; i < num_input_nodes; i++)
    {
        auto name = session_->GetInputNameAllocated(i, *allocator_);
        input_names_str_.push_back(name.get());
    }

    size_t num_output_nodes = session_->GetOutputCount();
    for (size_t i = 0; i < num_output_nodes; i++)
    {
        auto name = session_->GetOutputNameAllocated(i, *allocator_);
        output_name_str_.push_back(name.get());
    }
}

std::vector<Ort::Value> OnnxEmbedder::Run(
    const std::vector<int64_t> &input_ids,
    const std::vector<int64_t> &attention_mask,
    const std::vector<int64_t> &token_type_ids)
{
}
