// Date:   Sun Aug 24 11:57:43 PM 2025
// Mail:   lunar_ubuntu@qq.com
// Author: https://github.com/xiaoqixian

#pragma once

#include <map>
#include <memory>
#include <mutex>
#include <openvino/core/model.hpp>
#include <openvino/core/partial_shape.hpp>
#include <openvino/runtime/core.hpp>
#include <string>
#include <utility>

/**
 * @brief Thread-safe wrapper for creating OpenVINO models and compiled models.
 *
 * OpenVINO does not guarantee thread safety during model creation and compilation.
 * Concurrent calls to ov::Core::read_model() or ov::Core::compile_model() from
 * multiple threads may lead to crashes or undefined behavior.
 *
 * This function uses a mutex to serialize access, ensuring that only one thread
 * creates or compiles a model at a time.
 *
 * Steps performed:
 *   1. Reads the model from the specified file path.
 *   2. Applies any requested partial shape reshaping.
 *   3. Compiles the model for the CPU device.
 *
 * @param model_path Path to the OpenVINO model (.xml or .onnx).
 * @param partial_shapes Optional map of input names to partial shapes for reshaping.
 * @return A pair containing the shared pointer to the ov::Model and the compiled ov::CompiledModel.
 */
class OVCore {
  static ov::Core ov_core;
  static std::mutex mtx;
  static bool inited;

  OVCore() = default;

public:
  static auto create_model(
    std::string const& model_path,
    std::map<std::string,
    ov::PartialShape> const& partial_shapes
  ) -> std::pair<std::shared_ptr<ov::Model>, ov::CompiledModel>
  {
    std::unique_lock<std::mutex> lk(mtx);

    if (!inited) {
      inited = true;
      ov_core.set_property("CPU", ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT));
    }

    auto model = ov_core.read_model(model_path);
    model->reshape(partial_shapes);
    auto compiled_model = ov_core.compile_model(model, "CPU");
    return {model, compiled_model};
  }
};
