#include <rkllm_tools.hpp>
#include <iostream>
#include <fstream>
#include <thread>

std::string read_txt_file(std::filesystem::path const& p) {
  std::ifstream file(p, std::ios::in);
  
  if (!file.is_open()) {
      throw std::runtime_error("fail to open: " + p.string());
  }
  
  std::string content;
  std::string line;
  
  while (std::getline(file, line)) {
      content += line;
      content += "\n";
  }
  
  file.close();
  
  if (!content.empty() && content.back() == '\n') {
      content.pop_back();
  }
  
  return content;
}

int test_unit(rkllmt::RKLLMInfer &infer, std::string const &prompt_txt, std::string &answer, std::filesystem::path const &prompt_path = "") {
  int state = 0;
  std::shared_ptr<rkllmt::AtomicUserData> user_data = nullptr;
  infer.RunSync(user_data, prompt_txt, prompt_path);
  int total_token_count = user_data->Pop(answer, state);
  return total_token_count;
}

int main(int argc, char const *argv[]) {
  if (argc != 2) {
    std::cout << "Usage: compare_prompt path_of_model\n";
    return -1;
  }

  std::filesystem::path path = argv[1];
  rkllmt::RKLLMInfer::Param param;
  param.max_context_len = 64000; // in fact this size is large than the max length of op: E RKNN: [xxxx] meet unkown shape, op name: matmul_qk_rkllm_spilt_1, shape: 64, 128, 6464
  param.max_new_tokens = 64000;
  rkllmt::RKLLMInfer infer(path, param);

  auto prompt_txt ="User: " +  read_txt_file("../data/prompt_text.txt")+ "\nAssistant:";
  printf("Has Read context in ../data/prompt_text.txt\n");
  std::string answer;
  { 
    //! prepare prompt cache
    auto start = std::chrono::high_resolution_clock::now();
    int total_token_count = test_unit(infer, prompt_txt, answer, "../temp/prompt_cache.bin");
    auto end = std::chrono::high_resolution_clock::now();
    auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count() * 1e-3f;
    printf("Has Save prompt cache in ../temp/prompt_cache.bin\n");
    float token_per_second = static_cast<float>(total_token_count) / duration;
    printf("%.2f token/s token %d duration %.4fs\n", token_per_second, total_token_count, duration);
    std::cout << answer << "\n";
    prompt_txt = prompt_txt + answer + "\nUser: 详细说明师祖为什么给美猴王取名\"孙悟空\"？\nAssistant:";
  }

  {
    //! base line ask after the first question
    auto start = std::chrono::high_resolution_clock::now();
    int total_token_count = test_unit(infer, prompt_txt, answer);
    auto end = std::chrono::high_resolution_clock::now();
    auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count() * 1e-3f;
    float token_per_second = static_cast<float>(total_token_count) / duration;
    std::cout << answer << "\n";
    printf("%.2f token/s token %d duration %.4fs\n", token_per_second, total_token_count, duration);
  }

  {
    //! release and ask
    infer.ReleasePromptCache();
    printf("Has Release Prompt\n");
    auto start = std::chrono::high_resolution_clock::now();
    int total_token_count = test_unit(infer, prompt_txt, answer);
    auto end = std::chrono::high_resolution_clock::now();
    auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count() * 1e-3f;
    float token_per_second = static_cast<float>(total_token_count) / duration;
    std::cout << answer << "\n";
    printf("%.2f token/s token %d duration %.4fs\n", token_per_second, total_token_count, duration);
  }


    
  {
    //! load and ask
    {
      auto start = std::chrono::high_resolution_clock::now();
      infer.LoadPromptCache("../temp/prompt_cache.bin");
      auto end = std::chrono::high_resolution_clock::now();
      auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count() * 1e-3f;
      printf("Has Load Prompt costs %.4fs\n", duration);
    }

    auto start = std::chrono::high_resolution_clock::now();
    int total_token_count = test_unit(infer, prompt_txt, answer);
    auto end = std::chrono::high_resolution_clock::now();
    auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count() * 1e-3f;
    float token_per_second = static_cast<float>(total_token_count) / duration;
    std::cout << answer << "\n";
    printf("%.2f token/s token %d duration %.4fs\n", token_per_second, total_token_count, duration);
  }

  return 0;
}