#pragma once 

#include <memory>
#include <string>
#include <filesystem>
#include <mutex>
#include <vector>

namespace rkllmt {
class RKLLMInfer;
class AtomicUserData {
private:
  std::mutex  m_lock;
  std::string m_data;
  int         m_state;
  int         m_queue_size;

  std::filesystem::path const m_prompt_cache_path;
  std::string           const m_input;
  std::vector<uint8_t>        m_buffer;

public:
  AtomicUserData(size_t tpyical_str_size, std::filesystem::path const &prompt_cache_path, std::string const &input);
  AtomicUserData(AtomicUserData const &) = delete;
  AtomicUserData& operator=(const AtomicUserData&) = delete;

  int Pop(std::string &string, int &state);
  size_t Push(std::string const &append, int const state);

// private:
  void callback(int state, const char* text, int32_t token_id, const float* hidden_states = nullptr, int embd_size = -1, int num_tokens = -1);
  friend RKLLMInfer;
};


class RKLLMInfer {
private:
  std::mutex                   m_lock;
  std::filesystem::path  const m_model_path;
  std::filesystem::path  const m_lora_model_path;
  std::filesystem::path  const m_prompt_cache_path;
  std::vector<uint8_t>         m_buffer;
  std::unique_ptr<void*>       m_handle;
  int                    const m_max_context_length;
public:
  struct Param {
    std::filesystem::path  lora_model_path   = "";
    std::filesystem::path  prompt_cache_path = "";
    int   max_context_len   = 512;
    int   max_new_tokens    = -1;
    int   top_k             = 1;
    float top_p             = 0.9;
    float temperature       = 0.8;
    float repeat_penalty    = 1.1;
    float frequency_penalty = 0.0;
    float presence_penalty  = 0.0;
    int   mirostat          = 0;
    float mirostat_tau      = 5.0;
    float mirostat_eta      = 0.1;
    bool  is_sync           = true;
    bool  skip_special_token= true;
  };

  RKLLMInfer(std::filesystem::path const &model_path, Param const &param);
  ~RKLLMInfer();
  RKLLMInfer(RKLLMInfer &&other) noexcept = default;
  RKLLMInfer& operator=(RKLLMInfer &&other) = default;

  RKLLMInfer(RKLLMInfer const &) = delete;
  RKLLMInfer& operator=(RKLLMInfer const &) = delete;

  void RunAsync(std::shared_ptr<AtomicUserData> &user_data,
                std::string               const &prompt,
                std::filesystem::path     const &save_prompt_cache_path = "");

  void RunSync(std::shared_ptr<AtomicUserData> &user_data,
               std::string               const &prompt,
               std::filesystem::path     const &save_prompt_cache_path = "");

  void LoadPromptCache(std::filesystem::path const &prompt_cache_path);

  void ReleasePromptCache();

  void Abort();

  bool IsRun();
};

}