#pragma once
#include <onnxruntime_cxx_api.h>
#include <vector>
#include <string>
#include <memory>
#include <algorithm>
#include <iostream>

class ForceClassifier
{
public:
  explicit ForceClassifier(const std::string& model_path)
  {
    env_ = std::make_unique<Ort::Env>(ORT_LOGGING_LEVEL_WARNING, "ForceSVM");
    session_options_ = std::make_unique<Ort::SessionOptions>();
    session_options_->SetIntraOpNumThreads(1);
    session_options_->SetGraphOptimizationLevel(GraphOptimizationLevel::ORT_ENABLE_EXTENDED);

    session_ = std::make_unique<Ort::Session>(*env_, model_path.c_str(), *session_options_);
    allocator_ = std::make_unique<Ort::AllocatorWithDefaultOptions>();

    std::cout << "✅ ONNX 模型加载成功: " << model_path << std::endl;

    // 打印输出名方便调试
    size_t out_count = session_->GetOutputCount();
    for (size_t i = 0; i < out_count; ++i)
    {
      Ort::AllocatedStringPtr name = session_->GetOutputNameAllocated(i, *allocator_);
      std::cout << "🔹 输出[" << i << "]: " << name.get() << std::endl;
    }
  }

  int predict(double Fx, double Fy, double Fz)
  {
    // === 构造输入张量 ===
    std::vector<float> input_data = {(float)Fx, (float)Fy, (float)Fz};
    std::vector<int64_t> input_shape = {1, 3};
    auto memory_info = Ort::MemoryInfo::CreateCpu(OrtArenaAllocator, OrtMemTypeDefault);

    Ort::Value input_tensor = Ort::Value::CreateTensor<float>(
        memory_info, input_data.data(), input_data.size(),
        input_shape.data(), input_shape.size());

    // === 获取输入输出名 ===
    Ort::AllocatedStringPtr input_name_alloc = session_->GetInputNameAllocated(0, *allocator_);
    const char* input_name = input_name_alloc.get();

    Ort::AllocatedStringPtr output_name_alloc =
        session_->GetOutputNameAllocated(0, *allocator_); // output_label
    const char* output_name = output_name_alloc.get();

    // === 推理 ===
    std::vector<const char*> input_names{input_name};
    std::vector<const char*> output_names{output_name};

    auto output_tensors = session_->Run(
        Ort::RunOptions{nullptr},
        input_names.data(), &input_tensor, 1,
        output_names.data(), 1);

    // === 读取 int64 标签 ===
    int64_t* label_ptr = output_tensors.front().GetTensorMutableData<int64_t>();
    int predicted_label = static_cast<int>(label_ptr[0]);

    return predicted_label;
  }

private:
  std::unique_ptr<Ort::Env> env_;
  std::unique_ptr<Ort::SessionOptions> session_options_;
  std::unique_ptr<Ort::Session> session_;
  std::unique_ptr<Ort::AllocatorWithDefaultOptions> allocator_;
};
