#include "model/qwen2_hub.h"
#include "common/common.h"

#include <unistd.h>
#include <fstream>

namespace pa
{

Qwen2Hub::Qwen2Hub(const Type type, const std::string& hub): ModelBase(type, hub) {}
Qwen2Hub::~Qwen2Hub() {}

std::vector<std::string> Qwen2Hub::m_basic_files = {
    "tokenizer_config.json",
    "tokenizer.json",
    "vocab.json",
    "merges.txt",
    "model.safetensors"
};

bool Qwen2Hub::is_valid() const
{
    // 判断文件夹是否存在、是否可读
    int ret = access(m_file.c_str(), F_OK | R_OK);
    if (ret != 0) {
        PA_LOGE("not find hub path or hub path is not readable: %s", m_file.c_str());
        return false;
    }

    // 检查所有的文件是否都存在
    for (const auto& file : m_basic_files) {
        ret = access((m_file + "/" + file).c_str(), F_OK | R_OK);
        if (ret != 0) {
            PA_LOGE("not find file or file is not readable: %s", (m_file + "/" + file).c_str());
            return false;
        }
    }
    return true;
}


bool Qwen2Hub::load()
{
    // TODO: catch exception
    std::ifstream tokenizer_config(m_file + "/" + m_basic_files[0]);
    m_tokenizer_config_json = Json::parse(tokenizer_config);

    m_config_tokenizer_class = m_tokenizer_config_json["tokenizer_class"];

    std::ifstream tokenizer(m_file + "/" + m_basic_files[1]);
    m_tokenizer_json = Json::parse(tokenizer);

    auto& pre_tokenizer = m_tokenizer_json["pre_tokenizer"]["pretokenizers"];
    m_pre_token_pattern = pre_tokenizer[0]["pattern"]["Regex"];

    std::ifstream vocab(m_file + "/" + m_basic_files[2]);
    m_vocab_json = Json::parse(vocab);

    // 添加token字符串
    m_token_strs = {};
    // 添加vocab
    for(const auto& item: m_vocab_json.items()) {
        m_token_strs.push_back(item.key());
    }
    // 添加added tokens
    m_added_token_strs = {};
    auto& added_tokens = m_tokenizer_json["added_tokens"];
    for (const auto& token: added_tokens) {
        m_added_token_strs.push_back(token["content"]);
    }

    return true;
}


} // namespace pa