// Copyright (C) 2024 Kumo inc.
// Author: Jeff.li lijippy@163.com
// All rights reserved.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program.  If not, see <https://www.gnu.org/licenses/>.
//

#include <kllm/core/km_model.h>
#include <kllm/utility/log.h>

namespace kllm {

    struct LlamaModelInitResult {
        struct llama_model *model = nullptr;
        struct llama_context *context = nullptr;
        std::vector<common_lora_adapter_container> lora_adapters;
    };

    llama_model_params common_model_params_to_llama(const KMParams & params) {
        auto mparams = llama_model_default_params();

        if (params.n_gpu_layers != -1) {
            mparams.n_gpu_layers = params.n_gpu_layers;
        }
        mparams.rpc_servers     = params.rpc_servers.c_str();
        mparams.main_gpu        = params.main_gpu;
        mparams.split_mode      = params.split_mode;
        mparams.tensor_split    = params.tensor_split;
        mparams.use_mmap        = params.use_mmap;
        mparams.use_mlock       = params.use_mlock;
        mparams.check_tensors   = params.check_tensors;
        if (params.kv_overrides.empty()) {
            mparams.kv_overrides = NULL;
        } else {
            GGML_ASSERT(params.kv_overrides.back().key[0] == 0 && "KV overrides not terminated with empty key");
            mparams.kv_overrides = params.kv_overrides.data();
        }

        return mparams;
    }

    static ggml_type kv_cache_type_from_str(const std::string & s) {
        if (s == "f32") {
            return GGML_TYPE_F32;
        }
        if (s == "f16") {
            return GGML_TYPE_F16;
        }
        if (s == "bf16") {
            return GGML_TYPE_BF16;
        }
        if (s == "q8_0") {
            return GGML_TYPE_Q8_0;
        }
        if (s == "q4_0") {
            return GGML_TYPE_Q4_0;
        }
        if (s == "q4_1") {
            return GGML_TYPE_Q4_1;
        }
        if (s == "iq4_nl") {
            return GGML_TYPE_IQ4_NL;
        }
        if (s == "q5_0") {
            return GGML_TYPE_Q5_0;
        }
        if (s == "q5_1") {
            return GGML_TYPE_Q5_1;
        }

        throw std::runtime_error("Unsupported cache type: " + s);
    }

    struct llama_context_params common_context_params_to_llama(const KMParams & params) {
        auto cparams = llama_context_default_params();

        cparams.n_ctx             = params.n_ctx;
        cparams.n_seq_max         = params.n_parallel;
        cparams.n_batch           = params.n_batch;
        cparams.n_ubatch          = params.n_ubatch;
        cparams.n_threads         = params.cpuparams.n_threads;
        cparams.n_threads_batch   = params.cpuparams_batch.n_threads == -1 ?
                                    params.cpuparams.n_threads : params.cpuparams_batch.n_threads;
        cparams.logits_all        = params.logits_all;
        cparams.embeddings        = params.embedding;
        cparams.rope_scaling_type = params.rope_scaling_type;
        cparams.rope_freq_base    = params.rope_freq_base;
        cparams.rope_freq_scale   = params.rope_freq_scale;
        cparams.yarn_ext_factor   = params.yarn_ext_factor;
        cparams.yarn_attn_factor  = params.yarn_attn_factor;
        cparams.yarn_beta_fast    = params.yarn_beta_fast;
        cparams.yarn_beta_slow    = params.yarn_beta_slow;
        cparams.yarn_orig_ctx     = params.yarn_orig_ctx;
        cparams.pooling_type      = params.pooling_type;
        cparams.attention_type    = params.attention_type;
        cparams.defrag_thold      = params.defrag_thold;
        cparams.cb_eval           = params.cb_eval;
        cparams.cb_eval_user_data = params.cb_eval_user_data;
        cparams.offload_kqv       = !params.no_kv_offload;
        cparams.flash_attn        = params.flash_attn;
        cparams.no_perf           = params.no_perf;

        if (params.reranking) {
            cparams.embeddings    = true;
            cparams.pooling_type  = LLAMA_POOLING_TYPE_RANK;
        }

        cparams.type_k = kv_cache_type_from_str(params.cache_type_k);
        cparams.type_v = kv_cache_type_from_str(params.cache_type_v);

        return cparams;
    }

    LlamaModelInitResult common_init_from_params(KMParams & params) {
        LlamaModelInitResult iparams;
        auto mparams = common_model_params_to_llama(params);

        llama_model * model = nullptr;
        /*
        if (!params.hf_repo.empty() && !params.hf_file.empty()) {
            model = common_load_model_from_hf(params.hf_repo.c_str(), params.hf_file.c_str(), params.model.c_str(), params.hf_token.c_str(), mparams);
        } else if (!params.model_url.empty()) {
            model = common_load_model_from_url(params.model_url.c_str(), params.model.c_str(), params.hf_token.c_str(), mparams);
        } else {
            model = llama_load_model_from_file(params.model.c_str(), mparams);
        }
        */
        model = llama_load_model_from_file(params.model.c_str(), mparams);

        if (model == NULL) {
            LOG(ERROR)<<__func__<<": failed to load model '"<<params.model.c_str()<<"'";
            return iparams;
        }

        if (params.reranking) {
            bool ok = true;

            if (llama_token_bos(model) == LLAMA_TOKEN_NULL) {
                LOG(WARNING)<<__func__<<": warning: model does not have a  BOS token, reranking will not work";
                ok = false;
            }

            if (llama_token_eos(model) == LLAMA_TOKEN_NULL) {
                LOG(WARNING)<<__func__<<": warning: model does not have an EOS token, reranking will not work";
                ok = false;
            }

            if (llama_token_sep(model) == LLAMA_TOKEN_NULL) {
                LOG(WARNING)<<__func__<<": warning: model does not have a  SEP token, reranking will not work";
                ok = false;
            }

            if (!ok) {
                llama_free_model(model);

                return iparams;
            }
        }

        auto cparams = common_context_params_to_llama(params);

        llama_context * lctx = llama_new_context_with_model(model, cparams);
        if (lctx == NULL) {
            LOG(ERROR)<<__func__ <<": failed to create context with model '"<<params.model.c_str()<<"'";
            llama_free_model(model);
            return iparams;
        }

        if (!params.control_vectors.empty()) {
            if (params.control_vector_layer_start <= 0) params.control_vector_layer_start = 1;
            if (params.control_vector_layer_end   <= 0) params.control_vector_layer_end   = llama_n_layer(model);

            const auto cvec = common_control_vector_load(params.control_vectors);
            if (cvec.n_embd == -1) {
                llama_free(lctx);
                llama_free_model(model);

                return iparams;
            }

            int err = llama_control_vector_apply(lctx,
                                                 cvec.data.data(),
                                                 cvec.data.size(),
                                                 cvec.n_embd,
                                                 params.control_vector_layer_start,
                                                 params.control_vector_layer_end);
            if (err) {
                llama_free(lctx);
                llama_free_model(model);

                return iparams;
            }
        }

        // load and optionally apply lora adapters
        for (auto & la : params.lora_adapters) {
            common_lora_adapter_container loaded_la;
            loaded_la.path = la.path;
            loaded_la.scale = la.scale;
            loaded_la.adapter = llama_lora_adapter_init(model, la.path.c_str());
            if (loaded_la.adapter == nullptr) {
                LOG(ERROR)<<__func__<<": failed to apply lora adapter '"<<la.path.c_str()<<"'";
                llama_free(lctx);
                llama_free_model(model);
                return iparams;
            }
            iparams.lora_adapters.push_back(loaded_la); // copy to list of loaded adapters
        }
        if (!params.lora_init_without_apply) {
            common_lora_adapters_apply(lctx, iparams.lora_adapters);
        }

        if (params.sparams.ignore_eos && llama_token_eos(model) == LLAMA_TOKEN_NULL) {
            LOG(WARNING)<<__func__<<": warning: model does not have an EOS token, ignoring --ignore-eos";
            params.sparams.ignore_eos = false;
        }

        if (params.warmup) {
            LOG(WARNING)<<__func__<<": warming up the model with an empty run - please wait ... (--no-warmup to disable)";

            std::vector<llama_token> tmp;
            llama_token bos = llama_token_bos(model);
            llama_token eos = llama_token_eos(model);
            // some models (e.g. T5) don't have a BOS token
            if (bos != LLAMA_TOKEN_NULL) {
                tmp.push_back(bos);
            }
            if (eos != LLAMA_TOKEN_NULL) {
                tmp.push_back(eos);
            }
            if (tmp.empty()) {
                tmp.push_back(0);
            }

            if (llama_model_has_encoder(model)) {
                llama_encode(lctx, llama_batch_get_one(tmp.data(), tmp.size()));
                llama_token decoder_start_token_id = llama_model_decoder_start_token(model);
                if (decoder_start_token_id == -1) {
                    decoder_start_token_id = bos;
                }
                tmp.clear();
                tmp.push_back(decoder_start_token_id);
            }
            if (llama_model_has_decoder(model)) {
                llama_decode(lctx, llama_batch_get_one(tmp.data(), std::min(tmp.size(), (size_t) params.n_batch)));
            }
            llama_kv_cache_clear(lctx);
            llama_synchronize(lctx);
            llama_perf_context_reset(lctx);
        }

        iparams.model   = model;
        iparams.context = lctx;

        return iparams;
    }

    KMModel::~KMModel() {
        finalize();
    }

    turbo::Status KMModel::initialize(KMParams &p) {
        if (model != nullptr) {
            return turbo::already_exists_error("already initialized model for %s", p.model.c_str());
        }
        _params = &p;
        auto llama_init = common_init_from_params(p);

        model = llama_init.model;
        ctx = llama_init.context;
        loras = llama_init.lora_adapters;

        if (model == nullptr) {
            return turbo::unavailable_error("failed to load model, '%s'\n", p.model.c_str());
        }

        n_ctx = llama_n_ctx(ctx);

        add_bos_token = llama_add_bos_token(model);
        has_eos_token = !llama_add_eos_token(model);

        return turbo::OkStatus();
    }

    void KMModel::finalize() {
        if (ctx) {
            llama_free(ctx);
            ctx = nullptr;
        }

        if (model) {
            llama_free_model(model);
            model = nullptr;
        }
    }

    void KMModel::kv_cache_clear() {
        SRV_DBG("%s", "clearing KV cache\n");

        // clear the entire KV cache
        llama_kv_cache_clear(ctx);
        clean_kv_cache = false;
    }

    bool KMModel::validate_model_chat_template() const {
        llama_chat_message chat[] = {{"user", "test"}};

        const int res = llama_chat_apply_template(model, nullptr, chat, 1, true, nullptr, 0);

        return res > 0;
    }

    std::vector<llama_token> KMModel::tokenize(const std::string &text, bool add_special, bool parse_special) const {
        // upper limit for the number of tokens
        int32_t n_tokens = text.length() + 2 * add_special;
        std::vector<llama_token> result(n_tokens);
        n_tokens = llama_tokenize(model, text.data(), text.length(), result.data(), result.size(), add_special,
                                  parse_special);
        if (n_tokens < 0) {
            result.resize(-n_tokens);
            int check = llama_tokenize(model, text.data(), text.length(), result.data(), result.size(), add_special,
                                       parse_special);
            GGML_ASSERT(check == -n_tokens);
        } else {
            result.resize(n_tokens);
        }
        return result;
    }

}  // namespace kllm
