#include "cxxopts.hpp"
#include "fmt/base.h"
#include "nlohmann/json.hpp"
#include "onnxruntime/onnxruntime_c_api.h"
#include "onnxruntime/onnxruntime_cxx_api.h"
#include "tokenizers_cpp.h"
#include <cassert>
#include <fstream>
#include <iostream>
#include <ostream>
#include <string>
#include <vector>
#include <omp.h>
using namespace nlohmann;

std::string LoadBytesFromFile(const std::string& path)
{
    std::ifstream fs(path, std::ios::in | std::ios::binary);
    if (fs.fail())
    {
        std::cerr << "Cannot open " << path << std::endl;
        exit(1);
    }
    std::string data;
    fs.seekg(0, std::ios::end);
    size_t size = static_cast<size_t>(fs.tellg());
    fs.seekg(0, std::ios::beg);
    data.resize(size);
    fs.read(data.data(), size);
    return data;
}

int main(int argc, char** argv)
{
    cxxopts::Options options("onnx interface", "interface for onnx model");
    options.allow_unrecognised_options();
    options.add_options()("t,tokenizer", "tokenizer",
                          cxxopts::value<std::string>());
    options.add_options()("i,input", "File name", cxxopts::value<std::string>());
    options.add_options()("m,model", "ONNX model file",
                          cxxopts::value<std::string>());
    options.add_options()("h,help", "interface for onnx model",
                          cxxopts::value<bool>()->default_value("false"));
    cxxopts::ParseResult result = options.parse(argc, argv);
    auto blob = LoadBytesFromFile(result["tokenizer"].as<std::string>());
    auto tok = tokenizers::Tokenizer::FromBlobJSON(blob);
    std::ifstream json_read(result["input"].as<std::string>());
    json datas;
    json_read >> datas;
    json_read.close();
    // 初始化ONNX Runtime环境
    Ort::Env env(ORT_LOGGING_LEVEL_WARNING, "ONNX_Model");
    Ort::SessionOptions session_options;
    session_options.SetIntraOpNumThreads(1);

    // 启用CUDA执行提供程序
    OrtCUDAProviderOptions cuda_options;
    cuda_options.device_id = 0; // 使用第一个GPU设备
    cuda_options.arena_extend_strategy = 0;
    cuda_options.gpu_mem_limit = SIZE_MAX;
    cuda_options.cudnn_conv_algo_search = OrtCudnnConvAlgoSearchExhaustive;
    cuda_options.do_copy_in_default_stream = 1;

    OrtStatus* status =
        OrtSessionOptionsAppendExecutionProvider_CUDA(session_options, 0);
    if (status != nullptr)
    {
        std::cerr << "Failed to append CUDA execution provider: "
            << Ort::GetApi().GetErrorMessage(status) << std::endl;
        Ort::GetApi().ReleaseStatus(status);
        // 如果CUDA不可用，则继续使用CPU
    }

    // 加载ONNX模型
    auto model_path = result["model"].as<std::string>();
    Ort::Session session(env, model_path.c_str(), session_options);

    // 获取模型输入输出信息
    auto input_count = session.GetInputCount();
    auto output_count = session.GetOutputCount();

    // 获取实际的输入和输出名称
    const std::vector<std::string> input_names = session.GetInputNames();
    const std::vector<std::string> output_names = session.GetOutputNames();
    for (auto i : input_names)
    {
        fmt::println("{}", i);
    }
    for (auto i : output_names)
    {
        fmt::println("{}", i);
    }
    const int eos_token = tok->Encode("<|endoftext|>")[0]; // 假设eos_token_id为2，根据实际模型调整
    fmt::println("{}", eos_token);
    const int max_length = 500; // 最大生成长度

    datas[0]["input"] = "what is your name？";
    // 使用OpenMP并行处理数据
    // #pragma omp parallel for
    for (int i = 0; i < 1; i++)
    {
        auto encode = tok->Encode(datas[i]["input"].dump());
        std::cout << datas[i]["input"];
        std::vector<int64_t> current_input_ids(encode.begin(), encode.end());
        std::vector<int> generated_ids;

        while (generated_ids.size() < max_length &&
            (generated_ids.empty() || generated_ids.back() != eos_token))
        {
            // 构建动态输入
            int64_t seq_len = current_input_ids.size();
            std::vector<int64_t> input_shape = {1, seq_len};
            std::vector<int64_t> position_ids(seq_len);
            std::iota(position_ids.begin(), position_ids.end(), 0);
            std::vector<int64_t> attention_mask(seq_len, 1);

            // 创建输入张量
            Ort::MemoryInfo memory_info = Ort::MemoryInfo::CreateCpu(OrtArenaAllocator, OrtMemTypeDefault);
            Ort::Value input_tensor = Ort::Value::CreateTensor<int64_t>(
                memory_info, current_input_ids.data(), seq_len, input_shape.data(), 2);
            Ort::Value position_tensor = Ort::Value::CreateTensor<int64_t>(
                memory_info, position_ids.data(), seq_len, input_shape.data(), 2);
            Ort::Value attention_tensor = Ort::Value::CreateTensor<int64_t>(
                memory_info, attention_mask.data(), seq_len, input_shape.data(), 2);

            // 模型推理
            const char* input_names[] = {"input_ids", "attention_mask", "position_ids"};
            Ort::Value input_tensors[] = {
                std::move(input_tensor), std::move(attention_tensor), std::move(position_tensor)
            };
            const char* output_names[] = {"logits"};
            auto output_tensors = session.Run(Ort::RunOptions{nullptr}, input_names, input_tensors, 3, output_names, 1);

            // 解码logits
            float* logits = output_tensors[0].GetTensorMutableData<float>();
            int vocab_size = output_tensors[0].GetTensorTypeAndShapeInfo().GetShape()[1];
            int max_id = std::max_element(logits, logits + vocab_size) - logits;
            generated_ids.push_back(max_id);
            current_input_ids.push_back(max_id);
        }

        // 移除结束符并解码
        if (!generated_ids.empty() && generated_ids.back() == eos_token)
        {
            generated_ids.pop_back();
        }
        std::string decoded = tok->Decode(generated_ids);
        for (int& generated_id : generated_ids)
            fmt::print("{}\n", generated_id);
        fmt::println("样本{}的预测结果: {}", i, decoded);
    }
}
