#include <onnxruntime_cxx_api.h>
#include <iostream>
#include <vector>
#include <array>
#include <chrono>

int main(int argc, char* argv[]) {
    // 输入内容
    if (argc < 2) {
        std::cerr << "Usage: " << argv[0] << " model.onnx [cpu|gpu]" << std::endl;
        return -1;
    }

    std::string model_path = argv[1];
    bool use_cuda = (argc >= 3 && std::string(argv[2]) == "gpu");

    // 创建 ONNX Runtime 环境
    Ort::Env env(ORT_LOGGING_LEVEL_WARNING, "embed_demo");
    Ort::SessionOptions session_options;
    session_options.SetIntraOpNumThreads(1);

    if (use_cuda) {
        std::cout << "Using GPU Execution Provider" << std::endl;
        OrtCUDAProviderOptions cuda_options;
        cuda_options.device_id = 0;
        cuda_options.arena_extend_strategy = 0;
        cuda_options.gpu_mem_limit = SIZE_MAX;
        cuda_options.cudnn_conv_algo_search = OrtCudnnConvAlgoSearchExhaustive;
        cuda_options.do_copy_in_default_stream = 1;
        session_options.AppendExecutionProvider_CUDA(cuda_options);
    } else {
        std::cout << "Using CPU Execution Provider" << std::endl;
    }

    // 启动模型
    Ort::Session session(env, model_path.c_str(), session_options);

    // 分配器
    Ort::AllocatorWithDefaultOptions allocator;

    // 打印模型输入
    size_t num_input_nodes = session.GetInputCount();
    std::cout << "Model expects " << num_input_nodes << " inputs." << std::endl;
    for (size_t i = 0; i < num_input_nodes; i++) {
        auto name = session.GetInputNameAllocated(i, allocator);
        std::cout << "Input " << i << ": " << name.get() << std::endl;
    }

    // ===== 手动准备输入 =====
    // 假设 batch=1, sequence_length=7
    std::vector<int64_t> input_ids      = {101, 7632, 8024, 2024, 2017, 5440, 102};
    std::vector<int64_t> attention_mask = {1, 1, 1, 1, 1, 1, 1};
    std::vector<int64_t> token_type_ids = {0, 0, 0, 0, 0, 0, 0};

    std::vector<int64_t> input_shape = {1, (int64_t)input_ids.size()};
    Ort::MemoryInfo memory_info = Ort::MemoryInfo::CreateCpu(OrtArenaAllocator, OrtMemTypeDefault);

    Ort::Value input_ids_tensor = Ort::Value::CreateTensor<int64_t>(
        memory_info, input_ids.data(), input_ids.size(), input_shape.data(), input_shape.size());
    Ort::Value attention_mask_tensor = Ort::Value::CreateTensor<int64_t>(
        memory_info, attention_mask.data(), attention_mask.size(), input_shape.data(), input_shape.size());
    Ort::Value token_type_ids_tensor = Ort::Value::CreateTensor<int64_t>(
        memory_info, token_type_ids.data(), token_type_ids.size(), input_shape.data(), input_shape.size());

    std::array<const char*, 3> input_names = {"input_ids", "attention_mask", "token_type_ids"};
    std::array<Ort::Value, 3> ort_inputs = {
        std::move(input_ids_tensor),
        std::move(attention_mask_tensor),
        std::move(token_type_ids_tensor)
    };

    // ===== 输出准备 =====
    size_t num_output_nodes = session.GetOutputCount();
    std::vector<std::string> output_names_str;
    std::vector<const char*> output_node_names;

    for (size_t i = 0; i < num_output_nodes; i++) {
        auto name = session.GetOutputNameAllocated(i, allocator);
        output_names_str.push_back(name.get());
        std::cout << "Output " << i << ": " << name.get() << std::endl;
    }

    // 把 string 转成 const char* 数组
    for (auto &s : output_names_str) {
        output_node_names.push_back(s.c_str());
    }

    int num_runs = 100;
    double total_time = 0.0;
    for (int i=0; i<num_runs; i++){
        auto start = std::chrono::high_resolution_clock::now();

        // ===== 推理 =====
        auto output_tensors = session.Run(Ort::RunOptions{nullptr},
                                        input_names.data(),
                                        ort_inputs.data(),
                                        ort_inputs.size(),
                                        output_node_names.data(),
                                        output_node_names.size());

        auto end = std::chrono::high_resolution_clock::now();
        std::chrono::duration<double, std::milli> elapsed = end - start;
        total_time += elapsed.count();

    }

    std::cout << "Average inference time over " << num_runs
          << " runs: " << (total_time / num_runs) << " ms" << std::endl;

    // 运行推理
    // auto output_tensors = session.Run(
    //     Ort::RunOptions{nullptr},
    //     input_names.data(),
    //     ort_inputs.data(),
    //     ort_inputs.size(),
    //     output_node_names.data(),
    //     output_node_names.size()
    // );

    // // 打印结果的 shape
    // for (size_t i = 0; i < output_tensors.size(); i++) {
    //     auto type_info = output_tensors[i].GetTensorTypeAndShapeInfo();
    //     auto shape = type_info.GetShape();
    //     std::cout << "Output " << i << " shape: [";
    //     for (size_t j = 0; j < shape.size(); j++) {
    //         std::cout << shape[j] << (j + 1 < shape.size() ? ", " : "");
    //     }
    //     std::cout << "]" << std::endl;
    // }

    return 0;
}
