// #include <rkllm_tools.hpp>
#include "../inc/rkllm_tools.hpp"
#include <fstream>
#include <rkllm.h> // hidden rkllm.h
#include <iostream>

namespace rkllmt {
constexpr size_t alignment = sizeof(void *);
constexpr size_t RKLLMInputSize            = ((sizeof(RKLLMInput) + alignment - 1) / alignment) * alignment;
constexpr size_t RKLLMPromptCacheParamSize = ((sizeof(RKLLMPromptCacheParam) + alignment - 1) / alignment) * alignment;
constexpr size_t RKLLMInferParamSize       = ((sizeof(RKLLMInferParam) + alignment - 1) / alignment) * alignment;
constexpr size_t RKLLMLoraParamSize        = ((sizeof(RKLLMLoraParam) + alignment - 1) / alignment) * alignment;

constexpr size_t RKLLMInputOffset            = 0llu;
constexpr size_t RKLLMPromptCacheParamOffset = RKLLMInputOffset + RKLLMInputSize;
constexpr size_t RKLLMInferParamOffset       = RKLLMPromptCacheParamOffset + RKLLMInferParamSize;
constexpr size_t RKLLMLoraParamOffset        = RKLLMInferParamOffset + RKLLMInferParamSize;

constexpr size_t RKLLMParamSize = ((sizeof(RKLLMParam) + alignment - 1) / alignment) * alignment;

AtomicUserData::AtomicUserData(size_t tpyical_str_size, std::filesystem::path const &prompt_cache_path, std::string const &input) 
   : m_state(0), m_queue_size(0), m_prompt_cache_path(prompt_cache_path), m_input(input) {
  m_data.reserve(tpyical_str_size);
  constexpr size_t BufferSize = RKLLMInputSize + RKLLMPromptCacheParamSize + RKLLMInferParamSize + RKLLMLoraParamSize;
  this->m_buffer.resize(BufferSize);
  std::fill(this->m_buffer.begin(), this->m_buffer.end(), 0);

  auto* rkll_input = reinterpret_cast<RKLLMInput *>(this->m_buffer.data() + RKLLMInputOffset);
  rkll_input->input_type = RKLLMInputType::RKLLM_INPUT_PROMPT;
  rkll_input->prompt_input = this->m_input.c_str();

  auto* rkll_cache_param = reinterpret_cast<RKLLMPromptCacheParam *>(this->m_buffer.data() + RKLLMPromptCacheParamOffset);
  if (this->m_prompt_cache_path.empty()) {
    rkll_cache_param->save_prompt_cache = 0;
    rkll_cache_param->prompt_cache_path = nullptr;
  } else {
    rkll_cache_param->save_prompt_cache = 1;
    rkll_cache_param->prompt_cache_path = this->m_prompt_cache_path.c_str();
  }
  
  auto* rkll_infer_param = reinterpret_cast<RKLLMInferParam *>(this->m_buffer.data() + RKLLMInferParamOffset);
  rkll_infer_param->mode = RKLLMInferMode::RKLLM_INFER_GENERATE;
  rkll_infer_param->prompt_cache_params = rkll_cache_param;
  // TODO lora 
  rkll_infer_param->lora_params = 0;
}

int AtomicUserData::Pop(std::string &string, int &state) {
  std::lock_guard lk(this->m_lock);
  string = this->m_data;
  state = this->m_state;
  this->m_data.clear();
  int ret = this->m_queue_size;
  this->m_queue_size = 0;
  return ret;
}

size_t AtomicUserData::Push(std::string const &append, int const state) {
  std::lock_guard lk(this->m_lock);
  size_t size = this->m_data.size();
  this->m_data += append;
  this->m_state = state;
  if (RKLLM_RUN_NORMAL == this->m_state) ++this->m_queue_size;
  return size;
}

void AtomicUserData::callback(int state, const char* text, int32_t token_id, const float* hidden_states, int embd_size, int num_tokens) {
  // This function copied from examples/DeepSeek-R1-Distill-Qwen-1.5B_Demo/deploy/src/llm_demo.cpp
  // printf("%s:%d\n", __FILE__, __LINE__);
  
  if (state == RKLLM_RUN_FINISH){
    this->Push("\n", state);
  } else if (state == RKLLM_RUN_ERROR) {
    this->Push("\\run error\n", state);
  } else if (state == RKLLM_RUN_GET_LAST_HIDDEN_LAYER) {
      /* ================================================================================================================
      若使用GET_LAST_HIDDEN_LAYER功能,callback接口会回传内存指针:last_hidden_layer,token数量:num_tokens与隐藏层大小:embd_size
      通过这三个参数可以取得last_hidden_layer中的数据
      注:需要在当前callback中获取,若未及时获取,下一次callback会将该指针释放
      ===============================================================================================================*/
      if (embd_size != 0 && num_tokens != 0) {
          int data_size = embd_size * num_tokens * sizeof(float);
          printf("\ndata_size:%d",data_size);
          std::ofstream outFile("last_hidden_layer.bin", std::ios::binary);
          if (outFile.is_open()) {
              outFile.write(reinterpret_cast<const char*>(hidden_states), data_size);
              outFile.close();
              std::cout << "Data saved to output.bin successfully!" << std::endl;
          } else {
              std::cerr << "Failed to open the file for writing!" << std::endl;
          }
      }
    this->Push("", state);
  } else if (state == RKLLM_RUN_NORMAL) {
    // std::cout << text;
    this->Push(text, state);
  }
// printf("%s:%d\n", __FILE__, __LINE__);

}

void _callback(RKLLMResult* result, void* userdata, LLMCallState state){
    auto atomic_user_data = reinterpret_cast<AtomicUserData *>(userdata);
    if (state == RKLLM_RUN_FINISH){
      atomic_user_data->Push("\n", state);
    } else if (state == RKLLM_RUN_ERROR) {
      atomic_user_data->Push("\\run error\n", state);
    } else {
      atomic_user_data->callback(state,
                                 result->text,
                                 result->token_id,
                                 result->last_hidden_layer.hidden_states,
                                 result->last_hidden_layer.embd_size,
                                 result->last_hidden_layer.num_tokens);
    }

  //   // This function copied from examples/DeepSeek-R1-Distill-Qwen-1.5B_Demo/deploy/src/llm_demo.cpp
  //   if (state == RKLLM_RUN_FINISH){
  //     atomic_user_data->Push("\n", state);
  //   } else if (state == RKLLM_RUN_ERROR) {
  //     atomic_user_data->Push("\\run error\n", state);
  //   } else if (state == RKLLM_RUN_GET_LAST_HIDDEN_LAYER) {
  //       /* ================================================================================================================
  //       若使用GET_LAST_HIDDEN_LAYER功能,callback接口会回传内存指针:last_hidden_layer,token数量:num_tokens与隐藏层大小:embd_size
  //       通过这三个参数可以取得last_hidden_layer中的数据
  //       注:需要在当前callback中获取,若未及时获取,下一次callback会将该指针释放
  //       ===============================================================================================================*/
  //       if (result->last_hidden_layer.embd_size != 0 && result->last_hidden_layer.num_tokens != 0) {
  //           int data_size = result->last_hidden_layer.embd_size * result->last_hidden_layer.num_tokens * sizeof(float);
  //           printf("\ndata_size:%d",data_size);
  //           std::ofstream outFile("last_hidden_layer.bin", std::ios::binary);
  //           if (outFile.is_open()) {
  //               outFile.write(reinterpret_cast<const char*>(result->last_hidden_layer.hidden_states), data_size);
  //               outFile.close();
  //               std::cout << "Data saved to output.bin successfully!" << std::endl;
  //           } else {
  //               std::cerr << "Failed to open the file for writing!" << std::endl;
  //           }
  //       }
  //     atomic_user_data->Push("", state);
  //   } else if (state == RKLLM_RUN_NORMAL) {
  //     // std::cout << text;
  //     atomic_user_data->Push(result->text, state);
  //   }
  // // printf("%s:%d\n", __FILE__, __LINE__);

  };

void callback(RKLLMResult* result, void* userdata, LLMCallState state) { 
  auto atomic_user_data = reinterpret_cast<AtomicUserData *>(userdata);
  
  // This function copied from examples/DeepSeek-R1-Distill-Qwen-1.5B_Demo/deploy/src/llm_demo.cpp
  if (state == RKLLM_RUN_FINISH){
    atomic_user_data->Push("\n", state);
  } else if (state == RKLLM_RUN_ERROR) {
    atomic_user_data->Push("\\run error\n", state);
  } else if (state == RKLLM_RUN_GET_LAST_HIDDEN_LAYER) {
      /* ================================================================================================================
      若使用GET_LAST_HIDDEN_LAYER功能,callback接口会回传内存指针:last_hidden_layer,token数量:num_tokens与隐藏层大小:embd_size
      通过这三个参数可以取得last_hidden_layer中的数据
      注:需要在当前callback中获取,若未及时获取,下一次callback会将该指针释放
      ===============================================================================================================*/
      if (result->last_hidden_layer.embd_size != 0 && result->last_hidden_layer.num_tokens != 0) {
          int data_size = result->last_hidden_layer.embd_size * result->last_hidden_layer.num_tokens * sizeof(float);
          printf("\ndata_size:%d",data_size);
          std::ofstream outFile("last_hidden_layer.bin", std::ios::binary);
          if (outFile.is_open()) {
              outFile.write(reinterpret_cast<const char*>(result->last_hidden_layer.hidden_states), data_size);
              outFile.close();
              std::cout << "Data saved to output.bin successfully!" << std::endl;
          } else {
              std::cerr << "Failed to open the file for writing!" << std::endl;
          }
      }
    atomic_user_data->Push("", state);
  } else if (state == RKLLM_RUN_NORMAL) {
    // std::cout << text;
    atomic_user_data->Push(result->text, state);
  }
 }

RKLLMInfer::RKLLMInfer(std::filesystem::path const &model_path, Param const &param)
   : m_model_path(model_path),
     m_lora_model_path(param.lora_model_path),
     m_prompt_cache_path(param.prompt_cache_path),
     m_max_context_length(std::max(param.max_context_len, param.max_new_tokens)) {
  this->m_buffer.resize(RKLLMParamSize);
  std::fill(this->m_buffer.begin(), this->m_buffer.end(), 0);
  auto *_param = reinterpret_cast<RKLLMParam *>(this->m_buffer.data());
  *_param = rkllm_createDefaultParam();
  _param->model_path = this->m_model_path.c_str();
  _param->max_context_len = param.max_context_len;
  _param->max_new_tokens = param.max_new_tokens;
  _param->top_k = param.top_k;
  _param->top_p = param.top_p;
  _param->temperature = param.temperature;
  _param->repeat_penalty = param.repeat_penalty;
  _param->frequency_penalty = param.frequency_penalty;
  _param->presence_penalty = param.presence_penalty;
  _param->mirostat = param.mirostat;
  _param->mirostat_tau = param.mirostat_tau;
  _param->mirostat_tau = param.mirostat_tau;
  _param->skip_special_token = param.skip_special_token;
  _param->extend_param.base_domain_id = 0;
  _param->is_async = param.is_sync; // TODO
  
  
  this->m_handle = std::make_unique<LLMHandle>(nullptr);
  int ret = rkllm_init(reinterpret_cast<LLMHandle*>(this->m_handle.get()), _param, _callback);
  if (!this->m_prompt_cache_path.empty()) {
    ret = rkllm_load_prompt_cache(*this->m_handle, this->m_prompt_cache_path.c_str());
  }
  // TODO rkllm_load_lora
  // if (!this->m_lora_model_path.empty()) {
  //   ret = rkllm_load_lora(*this->m_p_llhandle, this->m_prompt_cache_path.c_str());
  // }

}

RKLLMInfer::~RKLLMInfer() {
  if (this->m_handle && *this->m_handle){
    int ret = 0;
    if (ret = rkllm_is_running(*this->m_handle); ret == 0) {
      ret = rkllm_abort(*this->m_handle);
    }
    ret = rkllm_destroy(*this->m_handle);
    *this->m_handle = nullptr;
  }
  this->m_handle.reset();
}

void RKLLMInfer::RunAsync(std::shared_ptr<AtomicUserData> &user_data,
                          std::string               const &prompt,
                          std::filesystem::path     const &save_prompt_cache_path) {
  user_data = std::make_shared<AtomicUserData>(std::max(32, this->m_max_context_length), save_prompt_cache_path, prompt);
  auto *rkllm_input = reinterpret_cast<RKLLMInput *>(user_data->m_buffer.data() + RKLLMInputOffset);
  auto *infer_param = reinterpret_cast<RKLLMInferParam *>(user_data->m_buffer.data() + RKLLMInferParamOffset);
  int ret = rkllm_run_async(*this->m_handle, rkllm_input, infer_param, user_data.get());
}

void RKLLMInfer::RunSync(std::shared_ptr<AtomicUserData> &user_data,
  std::string               const &prompt,
  std::filesystem::path     const &save_prompt_cache_path) {
user_data = std::make_shared<AtomicUserData>(std::max(32, this->m_max_context_length), save_prompt_cache_path, prompt);
auto *rkllm_input = reinterpret_cast<RKLLMInput *>(user_data->m_buffer.data() + RKLLMInputOffset);
auto *infer_param = reinterpret_cast<RKLLMInferParam *>(user_data->m_buffer.data() + RKLLMInferParamOffset);
int ret = rkllm_run(*this->m_handle, rkllm_input, infer_param, user_data.get());
}

void RKLLMInfer::LoadPromptCache(std::filesystem::path const &prompt_cache_path) {
  int ret = rkllm_load_prompt_cache(*this->m_handle, prompt_cache_path.c_str());
}

void RKLLMInfer::ReleasePromptCache() {
  int ret = rkllm_release_prompt_cache(*this->m_handle);
}

void RKLLMInfer::Abort() {
  int ret = rkllm_abort(*this->m_handle);
}

bool RKLLMInfer::IsRun() {
  return rkllm_is_running(*this->m_handle) == 0;
}


}
