// Copyright (C) 2024 Kumo inc.
// Author: Jeff.li lijippy@163.com
// All rights reserved.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program.  If not, see <https://www.gnu.org/licenses/>.
//

#include <kllm/core/km_context.h>
#include <kllm/utility/all.h>
#include <kllm/kai/proto_helper.h>
#include <kllm/core/batch.h>

namespace kllm {

    static size_t longest_common_prefix(const std::vector<llama_token> &a, const std::vector<llama_token> &b) {
        size_t i;
        for (i = 0; i < a.size() && i < b.size() && a[i] == b[i]; i++) {}

        return i;
    }

    static std::vector<llama_token>
    format_rerank(const struct llama_model *model, const std::vector<llama_token> &query, const std::vector<llama_token> &doc) {
        std::vector<llama_token> result;
        result.reserve(doc.size() + query.size() + 4);
        result.push_back(llama_token_bos(model));
        result.insert(result.end(), query.begin(), query.end());
        result.push_back(llama_token_eos(model));
        result.push_back(llama_token_sep(model));
        result.insert(result.end(), doc.begin(), doc.end());
        result.push_back(llama_token_eos(model));
        return result;
    }

    /**
     * break the input "prompt" object into multiple prompt if needed, then tokenize them
     * this supports these cases:
     * - "prompt": "string"
     * - "prompt": [12, 34, 56]
     * - "prompt": [12, 34, "string", 56, 78]
     * and multiple prompts (multi-tasks):
     * - "prompt": ["string1", "string2"]
     * - "prompt": ["string1", [12, 34, 56]]
     * - "prompt": [[12, 34, "string", 56, 78], [12, 34, 56]]
     */
    turbo::Status KMContext::initialize(const KMParams &pa) {
        params = pa;
        auto rs = km_model.initialize(params);
        if (!rs.ok()) {
            return rs;
        }
        const int32_t n_ctx_slot = km_model.n_ctx / params.n_parallel;

        SRV_INF("initializing slots, n_slots = %d\n", params.n_parallel);

        for (int i = 0; i < params.n_parallel; i++) {
            server_slot slot;

            slot.id = i;
            slot.n_ctx = n_ctx_slot;
            slot.n_predict = params.n_predict;

            SLT_INF(slot, "new slot n_ctx_slot = %d\n", slot.n_ctx);

            slot.sparams = params.sparams;

            slot.callback_on_release = [this](int) {
                queue_tasks.pop_deferred_task();
            };

            slot.reset();

            slots.push_back(slot);
        }

        default_generation_settings_for_props = slots.front();

        // the update_slots() logic will always submit a maximum of n_batch or n_parallel tokens
        // note that n_batch can be > n_ctx (e.g. for non-causal attention models such as BERT where the KV cache is not used)
        {
            const int32_t n_batch = llama_n_batch(km_model.ctx);

            // only a single seq_id per token is needed
            batch = llama_batch_init(std::max(n_batch, params.n_parallel), 0, 1);
        }

        metrics.init();
        return turbo::OkStatus();
    }

    server_slot *KMContext::get_slot_by_id(int id) {
        for (server_slot &slot: slots) {
            if (slot.id == id) {
                return &slot;
            }
        }

        return nullptr;
    }

    server_slot *KMContext::get_available_slot(const server_task &task) {
        server_slot *ret = nullptr;

        // find the slot that has at least n% prompt similarity
        if (ret == nullptr && slot_prompt_similarity != 0.0f) {
            int lcs_len = 0;
            float similarity = 0;

            for (server_slot &slot: slots) {
                // skip the slot if it is not available
                if (slot.is_processing()) {
                    continue;
                }

                // skip the slot if it does not contains cached tokens
                if (slot.cache_tokens.empty()) {
                    continue;
                }

                // length of the Longest Common Subsequence between the current slot's prompt and the input prompt
                int cur_lcs_len = longest_common_subsequence(slot.cache_tokens, task.prompt_tokens);

                // fraction of the common subsequence length compared to the current slot's prompt length
                float cur_similarity = static_cast<float>(cur_lcs_len) / static_cast<int>(slot.cache_tokens.size());

                // select the current slot if the criteria match
                if (cur_lcs_len > lcs_len && cur_similarity > slot_prompt_similarity) {
                    lcs_len = cur_lcs_len;
                    similarity = cur_similarity;
                    ret = &slot;
                }
            }

            if (ret != nullptr) {
                SLT_DBG(*ret, "selected slot by lcs similarity, lcs_len = %d, similarity = %f", lcs_len, similarity);
            }
        }

        // find the slot that has been least recently used
        if (ret == nullptr) {
            int64_t t_last = ggml_time_us();
            for (server_slot &slot: slots) {
                // skip the slot if it is not available
                if (slot.is_processing()) {
                    continue;
                }

                // select the current slot if the criteria match
                if (slot.t_last_used < t_last) {
                    t_last = slot.t_last_used;
                    ret = &slot;
                }
            }

            if (ret != nullptr) {
                SLT_DBG(*ret, "selected slot by lru, t_last = %ld", t_last);
            }
        }

        return ret;
    }

    bool KMContext::launch_slot_with_task(server_slot &slot, const server_task &task) {
        // Sampling parameter defaults are loaded from the global server context (but individual requests can still override them)
        auto default_sparams = params.sparams;
        const auto &req = task.req;

        if (req.has_oaicompat() && req.oaicompat()) {
            slot.oaicompat = true;
            slot.oaicompat_model = !req.has_model() ? std::string(DEFAULT_OAICOMPAT_MODEL) : req.model();
        } else {
            slot.oaicompat = false;
            slot.oaicompat_model = "";
        }
        slot.params = req.slot_params();
        slot.sparams.from_proto(req.sparam());


        if (slot.sparams.dry_base < 1.0f) {
            slot.sparams.dry_base = default_sparams.dry_base;
        }

        // sequence breakers for DRY
        {
            // Currently, this is not compatible with TextGen WebUI, Koboldcpp and SillyTavern format
            // Ref: https://github.com/oobabooga/text-generation-webui/blob/d1af7a41ade7bd3c3a463bfa640725edb818ebaf/extensions/openai/typing.py#L39

            if (req.sparam().dry_sequence_breakers().empty()) {
                slot.sparams.dry_sequence_breakers = default_sparams.dry_sequence_breakers;
            }
        }
        if (slot.n_predict > 0 && slot.params.n_predict() > slot.n_predict) {
            // Might be better to reject the request with a 400 ?
            slot.params.set_n_predict(slot.n_predict);
            SLT_WRN(slot, "n_predict = %d exceeds server configuration, setting to %d", slot.n_predict, slot.n_predict);
        }

        {
            slot.sparams.logit_bias.clear();

            if (req.sparam().ignore_eos() && km_model.has_eos_token) {
                slot.sparams.logit_bias.push_back({llama_token_eos(km_model.model), -INFINITY});
            }

            const auto &logit_bias = req.sparam().logit_bias();
            if (logit_bias.empty()) {
                const int n_vocab = llama_n_vocab(km_model.model);
                for (const auto &el: logit_bias) {
                    // TODO: we may want to throw errors here, in case "el" is incorrect
                    float bias = el.bias();
                    if(el.has_token()) {
                        llama_token tok = el.token();
                        if (tok >= 0 && tok < n_vocab) {
                            slot.sparams.logit_bias.push_back({tok, bias});
                        }
                    } else if(el.has_str_token()) {
                        auto toks = km_model.tokenize(el.str_token(), false);
                        for (auto tok: toks) {
                            slot.sparams.logit_bias.push_back({tok, bias});
                        }
                    }
                }
            }
        }

        {
            if (slot.smpl != nullptr) {
                common_sampler_free(slot.smpl);
            }

            slot.smpl = common_sampler_init(km_model.model, slot.sparams);
            if (slot.smpl == nullptr) {
                // for now, the only error that may happen here is invalid grammar
                send_error(task, "Failed to parse grammar", ERROR_TYPE_INVALID_REQUEST);
                return false;
            }
        }

        slot.state = SLOT_STATE_STARTED;

        SLT_INF(slot, "%s", "processing task\n");

        return true;
    }

    void KMContext::kv_cache_clear() {
        km_model.kv_cache_clear();
    }

    bool KMContext::process_token(completion_token_output &result, server_slot &slot) {
        // remember which tokens were sampled - used for repetition penalties during sampling
        const std::string token_str = token_to_piece(result.tok, params.special);
        slot.sampled = result.tok;

        // search stop word and delete it
        slot.generated_text += token_str;
        slot.has_next_token = true;

        // check if there is incomplete UTF-8 character at the end
        bool incomplete = false;
        for (unsigned i = 1; i < 5 && i <= slot.generated_text.size(); ++i) {
            unsigned char c = slot.generated_text[slot.generated_text.size() - i];
            if ((c & 0xC0) == 0x80) {
                // continuation byte: 10xxxxxx
                continue;
            }
            if ((c & 0xE0) == 0xC0) {
                // 2-byte character: 110xxxxx ...
                incomplete = i < 2;
            } else if ((c & 0xF0) == 0xE0) {
                // 3-byte character: 1110xxxx ...
                incomplete = i < 3;
            } else if ((c & 0xF8) == 0xF0) {
                // 4-byte character: 11110xxx ...
                incomplete = i < 4;
            }
            // else 1-byte character or invalid byte
            break;
        }

        if (!incomplete) {
            size_t pos = std::min(slot.n_sent_text, slot.generated_text.size());

            const std::string str_test = slot.generated_text.substr(pos);
            bool send_text = true;

            size_t stop_pos = slot.find_stopping_strings(str_test, token_str.size(), STOP_TYPE_FULL);
            if (stop_pos != std::string::npos) {
                slot.generated_text.erase(
                        slot.generated_text.begin() + pos + stop_pos,
                        slot.generated_text.end());
                pos = std::min(slot.n_sent_text, slot.generated_text.size());
            } else if (slot.has_next_token) {
                stop_pos = slot.find_stopping_strings(str_test, token_str.size(), STOP_TYPE_PARTIAL);
                send_text = stop_pos == std::string::npos;
            }

            // check if there is any token to predict
            if (send_text) {
                // no send the stop word in the response
                result.text_to_send = slot.generated_text.substr(pos, std::string::npos);
                slot.n_sent_text += result.text_to_send.size();
                // add the token to slot queue and cache
            }

            slot.add_token(result);
            if (slot.params.stream()) {
                send_partial_response(slot, result);
            }
        }

        if (incomplete) {
            slot.has_next_token = true;
        }

        // check the limits
        if (slot.n_decoded > 0 && slot.has_next_token && !slot.has_budget(params)) {
            slot.stopped_limit = true;
            slot.has_next_token = false;

            SLT_DBG(slot, "stopped by limit, n_decoded = %d, n_predict = %d\n", slot.n_decoded, slot.params.n_predict());
        }

        if (slot.has_new_line) {
            // if we have already seen a new line, we stop after a certain time limit
            if (slot.params.t_max_predict_ms() > 0 &&
                (ggml_time_us() - slot.t_start_generation > 1000.0f * slot.params.t_max_predict_ms())) {
                slot.stopped_limit = true;
                slot.has_next_token = false;

                SLT_DBG(slot, "stopped by time limit, n_decoded = %d, t_max_predict_ms = %d ms\n", slot.n_decoded,
                        (int) slot.params.t_max_predict_ms());
            }

            // require that each new line has a whitespace prefix (i.e. indentation) of at least slot.params.n_indent
            if (slot.params.n_indent() > 0) {
                // check the current indentation
                // TODO: improve by not doing it more than once for each new line
                if (slot.last_nl_pos > 0) {
                    size_t pos = slot.last_nl_pos;

                    int n_indent = 0;
                    while (pos < slot.generated_text.size() &&
                           (slot.generated_text[pos] == ' ' || slot.generated_text[pos] == '\t')) {
                        n_indent++;
                        pos++;
                    }

                    if (pos < slot.generated_text.size() && n_indent < slot.params.n_indent()) {
                        slot.stopped_limit = true;
                        slot.has_next_token = false;

                        // cut the last line
                        slot.generated_text.erase(pos, std::string::npos);

                        SLT_DBG(slot, "stopped by indentation limit, n_decoded = %d, n_indent = %d\n", slot.n_decoded,
                                n_indent);
                    }
                }

                // find the next new line
                {
                    const size_t pos = slot.generated_text.find('\n', slot.last_nl_pos);

                    if (pos != std::string::npos) {
                        slot.last_nl_pos = pos + 1;
                    }
                }
            }
        }

        // check if there is a new line in the generated text
        if (result.text_to_send.find('\n') != std::string::npos) {
            slot.has_new_line = true;
        }

        // if context shift is disabled, we stop when it reaches the context limit
        if (slot.n_past >= slot.n_ctx) {
            slot.truncated = true;
            slot.stopped_limit = true;
            slot.has_next_token = false;

            SLT_DBG(slot,
                    "stopped due to running out of context capacity, n_past = %d, n_prompt_tokens = %d, n_decoded = %d, n_ctx = %d\n",
                    slot.n_decoded, slot.n_prompt_tokens, slot.n_past, slot.n_ctx);
        }

        if (llama_token_is_eog(km_model.model, result.tok)) {
            slot.stopped_eos = true;
            slot.has_next_token = false;

            SLT_DBG(slot, "%s", "stopped by EOS\n");
        }

        const auto n_ctx_train = llama_n_ctx_train(km_model.model);

        if (slot.params.n_predict() < 1 && slot.n_predict < 1 && slot.n_prompt_tokens + slot.n_decoded >= n_ctx_train) {
            slot.truncated = true;
            slot.stopped_limit = true;
            slot.has_next_token = false; // stop prediction

            SLT_WRN(slot,
                    "n_predict (%d) is set for infinite generation. "
                    "Limiting generated tokens to n_ctx_train (%d) to avoid EOS-less generation infinite loop\n",
                    slot.params.n_predict(), n_ctx_train);
        }

        SLT_DBG(slot, "n_decoded = %d, n_remaining = %d, next token: %5d '%s'\n", slot.n_decoded, slot.n_remaining,
                result.tok, token_str.c_str());

        return slot.has_next_token; // continue
    }



    void KMContext::send_error(const server_task &task, const std::string &error, const enum ErrorType type) {
        send_error(task.id, error, type);
    }

    void KMContext::send_error(const server_slot &slot, const std::string &error, const enum ErrorType type) {
        send_error(slot.id_task, error, type);
    }

    void KMContext::send_error(const int id_task, const std::string &error, const enum ErrorType type) {
        SRV_ERR("task id = %d, error: %s\n", id_task, error.c_str());

        TaskResult res;
        res.id = id_task;
        res.stop = false;
        res.error = true;
        res.errcode = type;
        res.errmsg = error;
        queue_results.send(res);
    }

    void KMContext::send_partial_response(server_slot &slot, completion_token_output tkn) {
        TaskResult res;
        res.id = slot.id_task;
        res.error = false;
        res.slot = slot;
        res.stop = false;
        auto ss = trans_proto(slot);
        ss.set_prompt(detokenize(slot.prompt_tokens));
        *res.completions.mutable_slot() =ss;
        res.completions.set_stop(false);
        res.completions.set_content(tkn.text_to_send);
        res.completions.set_multimodal(false);
        if (slot.sparams.n_probs > 0) {
            const std::vector<llama_token> to_send_toks = km_model.tokenize(tkn.text_to_send, false);
            const size_t probs_pos = std::min(slot.n_sent_token_probs, slot.generated_token_probs.size());
            const size_t probs_stop_pos = std::min(slot.n_sent_token_probs + to_send_toks.size(),
                                                   slot.generated_token_probs.size());

            std::vector<completion_token_output> probs_output;
            if (probs_pos < probs_stop_pos) {
                probs_output = std::vector<completion_token_output>(
                        slot.generated_token_probs.begin() + probs_pos,
                        slot.generated_token_probs.begin() + probs_stop_pos);
            }
            slot.n_sent_token_probs = probs_stop_pos;
            auto mcp = res.completions.mutable_completion_probabilities();
            for(auto &it : probs_output) {
                auto ptr = mcp->Add();
                ptr->set_text_to_send(it.text_to_send);
                ptr->set_tok(it.tok);
                for(auto &sit : it.probs) {
                    auto sptr = ptr->mutable_probs()->Add();
                    sptr->set_tok(sit.tok);
                    sptr->set_prob(sit.prob);
                }
            }
        }
        queue_results.send(res);
    }

    void KMContext::send_embedding(const server_slot &slot, const llama_batch &batch) {
        TaskResult res;
        res.id = slot.id_task;
        res.error = false;
        res.stop = true;

        const int n_embd = llama_n_embd(km_model.model);

        std::vector<float> embd_res(n_embd, 0.0f);
        static std::vector<float> zeros(n_embd, 0.0f);
        LOG(INFO)<<"batch.n_tokens: "<<batch.n_tokens;
        for (int i = 0; i < batch.n_tokens; ++i) {
            EmbeddingObject obj;
            if (!batch.logits[i] || batch.seq_id[i][0] != slot.id) {
                LOG(INFO)<<"skip";
                continue;
            }

            const float *embd = llama_get_embeddings_seq(km_model.ctx, batch.seq_id[i][0]);
            if (embd == nullptr) {
                embd = llama_get_embeddings_ith(km_model.ctx, i);
            }

            if (embd == nullptr) {
                SLT_ERR(slot, "failed to get embeddings, token = %d, seq_id = %d\n", batch.token[i],
                        batch.seq_id[i][0]);
                obj.set_index(slot.index);
                obj.mutable_embedding()->Assign(zeros.begin(),zeros.end());
                *res.embeddings.mutable_embedding()->Add() = std::move(obj);

                continue;
            }

            common_embd_normalize(embd, embd_res.data(), n_embd);
            obj.set_index(slot.index);
            obj.mutable_embedding()->Assign(embd_res.begin(),embd_res.end());
            *res.embeddings.mutable_embedding()->Add() = std::move(obj);
        }

        SLT_DBG(slot, "%s", "sending embeddings\n");

        queue_results.send(res);
    }

    void KMContext::send_rerank(const server_slot &slot, const llama_batch &batch) {
        TaskResult res;
        res.id = slot.id_task;
        res.error = false;
        res.stop = true;

        for (int i = 0; i < batch.n_tokens; ++i) {
            if (!batch.logits[i] || batch.seq_id[i][0] != slot.id) {
                continue;
            }

            const float *embd = llama_get_embeddings_seq(km_model.ctx, batch.seq_id[i][0]);
            if (embd == nullptr) {
                embd = llama_get_embeddings_ith(km_model.ctx, i);
            }

            if (embd == nullptr) {
                SLT_ERR(slot, "failed to get embeddings, token = %d, seq_id = %d\n", batch.token[i],
                        batch.seq_id[i][0]);
                res.reranks.set_score(-1e6);
                res.reranks.set_index( slot.index);
                continue;
            }
            res.reranks.set_score(embd[0]);
            res.reranks.set_index( slot.index);
        }

        SLT_DBG(slot, "sending rerank result, res = '%s'\n", "/*res.data.dump().c_str()*/");

        queue_results.send(res);
    }

    std::vector<llama_token> KMContext::format_infill(
            const KaiRequest &req,
            const int n_batch,
            const int n_predict,
            const int n_ctx,
            const bool spm_infill,
            const std::vector<llama_token> &tokens_prompt
    ) {
        // TODO: optimize this block by reducing memory allocations and movement

        // use FIM repo-level pattern:
        // ref: https://arxiv.org/pdf/2409.12186
        //
        // [FIM_REP]myproject
        // [FIM_SEP]filename0
        // extra chunk 0
        // [FIM_SEP]filename1
        // extra chunk 1
        // ...
        // [FIM_SEP]filename
        // [FIM_PRE]prefix[FIM_SUF]suffix[FIM_MID]prompt
        //
        std::vector<llama_token> extra_tokens;
        extra_tokens.reserve(n_ctx);

        auto model = llama_get_model(km_model.ctx);
        auto tokens_prefix = tokenize_mixed(req.infill_request().input_prefix(), false, false);
        auto tokens_suffix = tokenize_mixed(req.infill_request().input_suffix(), false, false);

        if (llama_token_fim_rep(model) != LLAMA_TOKEN_NULL) {
            // TODO: make project name an input
            static const auto k_fim_repo = tokenize("myproject\n", false, false);

            extra_tokens.push_back(llama_token_fim_rep(model));
            extra_tokens.insert(extra_tokens.end(), k_fim_repo.begin(), k_fim_repo.end());
        }
        for (const auto &chunk: req.infill_request().input_extra()) {
            // { "text": string, "filename": string }
            const std::string text = chunk.text();
            const std::string filename = chunk.filename();

            if (llama_token_fim_sep(model) != LLAMA_TOKEN_NULL) {
                const auto k_fim_file = tokenize(filename + "\n", false, false);

                extra_tokens.insert(extra_tokens.end(), llama_token_fim_sep(model));
                extra_tokens.insert(extra_tokens.end(), k_fim_file.begin(), k_fim_file.end());
            } else {
                // chunk separator in binary form to avoid confusing the AI
                static const char k_chunk_prefix_str[] = {0x0a, 0x0a, 0x2d, 0x2d, 0x2d, 0x20, 0x73, 0x6e, 0x69, 0x70,
                                                          0x70, 0x65, 0x74, 0x20, 0x2d, 0x2d, 0x2d, 0x0a, 0x0a, 0x00};
                static const auto k_chunk_prefix_tokens = tokenize( k_chunk_prefix_str, false, false);

                extra_tokens.insert(extra_tokens.end(), k_chunk_prefix_tokens.begin(), k_chunk_prefix_tokens.end());
            }

            const auto chunk_tokens = tokenize(text, false, false);
            extra_tokens.insert(extra_tokens.end(), chunk_tokens.begin(), chunk_tokens.end());
        }

        if (llama_token_fim_sep(model) != LLAMA_TOKEN_NULL) {
            // TODO: current filename
            static const auto k_fim_file = tokenize("filename\n", false, false);

            extra_tokens.insert(extra_tokens.end(), llama_token_fim_sep(model));
            extra_tokens.insert(extra_tokens.end(), k_fim_file.begin(), k_fim_file.end());
        }

        // for now pick FIM context to fit in a batch (ratio prefix:suffix = 3:1, TODO: configurable?)
        const int n_prefix_take = std::min<int>(tokens_prefix.size(), 3 * (n_batch / 4));
        const int n_suffix_take = std::min<int>(tokens_suffix.size(),
                                                std::max<int>(0, (n_batch / 4) - (2 + tokens_prompt.size())));

        SRV_DBG("n_prefix_take = %d, n_suffix_take = %d, total = %d\n", n_prefix_take, n_suffix_take,
                (n_prefix_take + n_suffix_take));

        // fill the rest of the context with extra chunks
        const int n_extra_take = std::min<int>(std::max<int>(0, n_ctx - (n_batch) - 2 * n_predict),
                                               extra_tokens.size());

        tokens_prefix.erase(tokens_prefix.begin(), tokens_prefix.begin() + tokens_prefix.size() - n_prefix_take);
        tokens_suffix.resize(n_suffix_take);

        tokens_prefix.insert(tokens_prefix.begin(), llama_token_fim_pre(model));
        tokens_prefix.insert(tokens_prefix.end(), tokens_prompt.begin(), tokens_prompt.end());
        tokens_suffix.insert(tokens_suffix.begin(), llama_token_fim_suf(model));

        auto embd_inp = spm_infill ? tokens_suffix : tokens_prefix;
        auto embd_end = spm_infill ? tokens_prefix : tokens_suffix;

        if (llama_add_bos_token(model)) {
            embd_inp.insert(embd_inp.begin(), llama_token_bos(model));
        }

        SRV_DBG("extra: n_ctx = %d, n_extra_take = %d, n_extra = %d\n", n_ctx, n_extra_take, (int) extra_tokens.size());

        // put the extra context before the FIM prefix
        embd_inp.insert(embd_inp.begin(), extra_tokens.end() - n_extra_take, extra_tokens.end());

        embd_inp.insert(embd_inp.end(), embd_end.begin(), embd_end.end());
        embd_inp.push_back(llama_token_fim_mid(model));

        return embd_inp;
    }

    std::vector<server_task> KMContext::create_tasks_inference(const KaiRequest &req, ServerTaskInfType inf_type, const KaiPrompts&prompts) {
        std::vector<server_task> tasks;
        auto create_task = [&](const KaiRequest &task_data, std::vector<llama_token> &prompt_tokens, int32_t index) {
            SRV_DBG("create task, n_tokens = %d\n", (int) prompt_tokens.size());
            server_task task;
            task.index = index;
            task.id = queue_tasks.get_new_id();
            task.inf_type = inf_type;
            task.type = SERVER_TASK_TYPE_INFERENCE;
            task.req = task_data;
            task.prompt_tokens = std::move(prompt_tokens);
            tasks.push_back(std::move(task));
        };

        KaiRequest task_req = req;
        // because llama_tokenize api is thread-safe, we can tokenize the prompt from HTTP thread
        bool add_special = inf_type != SERVER_TASK_INF_TYPE_RERANK && inf_type != SERVER_TASK_INF_TYPE_INFILL;
        turbo::Status rs;
        std::vector<std::vector<llama_token>> tokenized_prompts = tokenize_input_prompts(prompts, add_special, true, rs);

        switch (inf_type) {
            case SERVER_TASK_INF_TYPE_RERANK: {
                // prompts[0] is the question
                // the rest are the answers/documents
                GGML_ASSERT(tokenized_prompts.size() > 1);
                SRV_DBG("creating rerank tasks, n_prompts = %d\n", (int) tokenized_prompts.size() - 1);
                for (size_t i = 1; i < tokenized_prompts.size(); i++) {
                    auto tokens = format_rerank(km_model.model, tokenized_prompts[0], tokenized_prompts[i]);
                    create_task(task_req, tokens, i - 1);
                }
            }
                break;
            case SERVER_TASK_INF_TYPE_INFILL: {
                SRV_DBG("creating infill tasks, n_prompts = %d\n", (int) tokenized_prompts.size());
                for (size_t i = 0; i < tokenized_prompts.size(); i++) {
                    auto tokens = format_infill(
                            task_req,
                            params.n_batch,
                            params.n_predict,
                            slots[0].n_ctx, // TODO: there should be a better way
                            params.spm_infill,
                            tokenized_prompts[i]
                    );
                    create_task(task_req, tokens, i);
                }
            }
                break;
            default: {
                SRV_DBG("creating multi-prompt tasks, n_prompts = %d\n", (int) tokenized_prompts.size());
                for (size_t i = 0; i < tokenized_prompts.size(); i++) {
                    create_task(task_req, tokenized_prompts[i], i);
                }
            }
        }

        return tasks;
    }

    void KMContext::cancel_tasks(const std::unordered_set<int> &id_tasks) {
        std::vector<server_task> cancel_tasks;
        cancel_tasks.reserve(id_tasks.size());
        for (const auto &id_task: id_tasks) {
            SRV_WRN("cancel task, id_task = %d\n", id_task);

            server_task task;
            task.type = SERVER_TASK_TYPE_CANCEL;
            task.id_target = id_task;
            cancel_tasks.push_back(task);
            queue_results.remove_waiting_task_id(id_task);
        }
        // push to beginning of the queue, so it has highest priority
        queue_tasks.post(cancel_tasks, true);
    }

    void KMContext::receive_cmpl_results(
            const std::unordered_set<int> & id_tasks,
            const std::function<void(std::vector<TaskResult>&)> & result_handler,
            const std::function<void(const turbo::Status&)> & error_handler) {
        // TODO: currently, there is no way to detect the client has cancelled the request
        std::vector<TaskResult> results(id_tasks.size());
        for (size_t i = 0; i < id_tasks.size(); i++) {
            TaskResult result = queue_results.recv(id_tasks);

            if (result.error) {
                error_handler(turbo::Status(static_cast<turbo::StatusCode>(result.errcode), result.errmsg));
                cancel_tasks(id_tasks);
                return;
            }
            const size_t idx = result.slot.index;
            GGML_ASSERT(idx < results.size() && "index out of range");

            results[idx] = result;
        }
        result_handler(results);
    }

    void KMContext::receive_cmpl_results_stream(
            const std::unordered_set<int> & id_tasks, const
    std::function<bool(TaskResult&)> & result_handler, const
            std::function<void(const turbo::Status &status)> & error_handler) {
        size_t n_finished = 0;
        while (true) {
            TaskResult result = queue_results.recv(id_tasks);
            if (!result_handler(result)) {
                cancel_tasks(id_tasks);
                LOG(WARNING)<<1;
                break;
            }

            if (result.error) {
                error_handler(turbo::Status(static_cast<turbo::StatusCode>(result.errcode), result.errmsg));
                cancel_tasks(id_tasks);
                LOG(WARNING)<<2;
                break;
            }

            if (result.stop) {
                if (++n_finished == id_tasks.size()) {
                    LOG(WARNING)<<3;
                    break;
                }
            }
        }
    }

    KaiSlotState KMContext::trans_proto(const server_slot&slot) const {
        KaiSlotState state;
        state.set_n_ctx(slot.n_ctx);
        state.set_n_predict(slot.n_predict);
        state.set_model_alias(params.model_alias);
        state.set_seed_cur( slot.smpl ? common_sampler_get_seed(slot.smpl) : 0);
        state.mutable_antiprompt()->Assign(slot.params.antiprompt().begin(), slot.params.antiprompt().end());
        state.set_max_tokens(slot.params.n_predict());
        state.set_n_keep(slot.params.n_keep());
        state.set_n_discard(slot.params.n_discard());

        state.set_id(slot.id);
        state.set_id_task(slot.id_task);
        state.set_is_processing(slot.is_processing());
        state.set_prompt(detokenize(slot.prompt_tokens));
        state.set_has_next_token(slot.has_next_token);
        state.set_has_new_line(slot.has_new_line);
        state.set_n_remain(slot.n_remaining);
        state.set_n_decoded(slot.n_decoded);
        state.set_stopped_eos(slot.stopped_eos);
        state.set_stopped_word(slot.stopped_word);
        state.set_stopped_limit(slot.stopped_limit);
        state.set_stopping_word(slot.stopping_word);
        state.set_index(slot.index);
        state.set_tokens_cached(slot.n_past);
        state.set_tokens_evaluated(slot.n_prompt_tokens);
        state.set_truncated(slot.truncated);
        state.set_n_prompt_tokens_processed(slot.n_prompt_tokens_processed);
        state.set_t_prompt_processing(slot.t_prompt_processing);
        state.set_t_token_generation(slot.t_token_generation);
        return state;
    }

    void KMContext::process_single_task(server_task task) {
        switch (task.type) {
            case SERVER_TASK_TYPE_INFERENCE: {
                const int id_slot = task.req.id_slot();

                server_slot *slot = id_slot != -1 ? get_slot_by_id(id_slot) : get_available_slot(task);

                if (slot == nullptr) {
                    // if no slot is available, we defer this task for processing later
                    SRV_DBG("no slot is available, defer task, id_task = %d\n", task.id);
                    queue_tasks.defer(task);
                    break;
                }
                if (slot->is_processing()) {
                    // if requested slot is unavailable, we defer this task for processing later
                    SRV_DBG("requested slot is unavailable, defer task, id_task = %d\n", task.id);
                    queue_tasks.defer(task);
                    break;
                }

                slot->reset();

                slot->id_task = task.id;
                slot->inf_type = task.inf_type;
                slot->index = task.index;
                slot->prompt_tokens = std::move(task.prompt_tokens);

                if (!launch_slot_with_task(*slot, task)) {
                    SRV_ERR("failed to launch slot with task, id_task = %d\n", task.id);
                    break;
                }
            }
                break;
            case SERVER_TASK_TYPE_CANCEL: {
                // release slot linked with the task id
                for (auto &slot: slots) {
                    if (slot.id_task == task.id_target) {
                        slot.release();
                        break;
                    }
                }
            }
                break;
            case SERVER_TASK_TYPE_NEXT_RESPONSE: {
                // do nothing
            }
                break;
            case SERVER_TASK_TYPE_METRICS: {
                int n_idle_slots = 0;
                int n_processing_slots = 0;
                TaskResult res;
                for (server_slot &slot: slots) {
                    KaiSlotState state;
                    state.set_n_ctx(slot.n_ctx);
                    state.set_n_predict(slot.n_predict);
                    state.set_model_alias(params.model_alias);
                    state.set_seed_cur( slot.smpl ? common_sampler_get_seed(slot.smpl) : 0);
                    state.mutable_antiprompt()->Assign(slot.params.antiprompt().begin(), slot.params.antiprompt().end());
                    state.set_max_tokens(slot.params.n_predict());
                    state.set_n_keep(slot.params.n_keep());
                    state.set_n_discard(slot.params.n_discard());

                    state.set_id(slot.id);
                    state.set_id_task(slot.id_task);
                    state.set_is_processing(slot.is_processing());
                    state.set_prompt(detokenize(slot.prompt_tokens));
                    state.set_has_next_token(slot.has_next_token);
                    state.set_has_new_line(slot.has_new_line);
                    state.set_n_remain(slot.n_remaining);
                    state.set_n_decoded(slot.n_decoded);
                    state.set_stopped_eos(slot.stopped_eos);
                    state.set_stopped_word(slot.stopped_word);
                    state.set_stopped_limit(slot.stopped_limit);
                    state.set_stopping_word(slot.stopping_word);
                    if (slot.is_processing()) {
                        n_processing_slots++;
                    } else {
                        n_idle_slots++;
                    }
                    res.slot_state.push_back(state);
                }
                SRV_DBG("n_idle_slots = %d, n_processing_slots = %d\n", n_idle_slots, n_processing_slots);

                res.id = task.id;
                res.stop = true;
                res.error = false;
                res.metrics = metrics.metrics;
                res.metrics.set_n_idle_slots(n_idle_slots);
                res.metrics.set_n_processing_slots(n_processing_slots);
                res.metrics.set_deferred(queue_tasks.queue_tasks_deferred.size());
                res.metrics.set_kv_cache_tokens_count(llama_get_kv_cache_token_count(km_model.ctx));
                res.metrics.set_kv_cache_used_cells(llama_get_kv_cache_used_cells(km_model.ctx));
                if (task.reset_bucket) {
                    metrics.reset_bucket();
                }
                queue_results.send(res);
            }
                break;
            case SERVER_TASK_TYPE_SLOT_SAVE: {
                int id_slot = task.req.slots_task().id_slot();
                server_slot *slot = get_slot_by_id(id_slot);
                if (slot == nullptr) {
                    send_error(task, "Invalid slot ID", ERROR_TYPE_INVALID_REQUEST);
                    break;
                }
                if (slot->is_processing()) {
                    // if requested slot is unavailable, we defer this task for processing later
                    SRV_DBG("requested slot is unavailable, defer task, id_task = %d\n", task.id);
                    queue_tasks.defer(task);
                    break;
                }

                const size_t token_count = slot->cache_tokens.size();
                const int64_t t_start = ggml_time_us();

                std::string filename = task.req.slots_task().filename();
                std::string filepath = task.req.slots_task().filepath();

                const size_t nwrite = llama_state_seq_save_file(km_model.ctx, filepath.c_str(), slot->id,
                                                                slot->cache_tokens.data(), token_count);

                const int64_t t_end = ggml_time_us();
                const double t_save_ms = (t_end - t_start) / 1000.0;

                TaskResult result;
                result.id = task.id;
                result.stop = true;
                result.error = false;
                result.slots_task.set_id_slot(id_slot);
                result.slots_task.set_n_saved(token_count);
                result.slots_task.set_filename(filename);
                result.slots_task.set_n_written(nwrite);
                result.slots_task.set_t_save_ms(t_save_ms);
                queue_results.send(result);
            }
                break;
            case SERVER_TASK_TYPE_SLOT_RESTORE: {
                int id_slot = task.req.slots_task().id_slot();
                server_slot *slot = get_slot_by_id(id_slot);
                if (slot == nullptr) {
                    send_error(task, "Invalid slot ID", ERROR_TYPE_INVALID_REQUEST);
                    break;
                }
                if (slot->is_processing()) {
                    // if requested slot is unavailable, we defer this task for processing later
                    SRV_DBG("requested slot is unavailable, defer task, id_task = %d\n", task.id);
                    queue_tasks.defer(task);
                    break;
                }

                const int64_t t_start = ggml_time_us();

                std::string filename = task.req.slots_task().filename();
                std::string filepath = task.req.slots_task().filepath();

                slot->cache_tokens.resize(slot->n_ctx);
                size_t token_count = 0;
                size_t nread = llama_state_seq_load_file(km_model.ctx, filepath.c_str(), slot->id, slot->cache_tokens.data(),
                                                         slot->cache_tokens.size(), &token_count);
                if (nread == 0) {
                    slot->cache_tokens.resize(0);
                    send_error(task,
                               "Unable to restore slot, no available space in KV cache or invalid slot save file",
                               ERROR_TYPE_INVALID_REQUEST);
                    break;
                }
                slot->cache_tokens.resize(token_count);

                const int64_t t_end = ggml_time_us();
                const double t_restore_ms = (t_end - t_start) / 1000.0;

                TaskResult result;
                result.id = task.id;
                result.stop = true;
                result.error = false;
                result.slots_task.set_id_slot(id_slot);
                result.slots_task.set_n_restored(token_count);
                result.slots_task.set_filename(filename);
                result.slots_task.set_n_read(nread);
                result.slots_task.set_t_restore_ms(t_restore_ms);
                queue_results.send(result);
            }
                break;
            case SERVER_TASK_TYPE_SLOT_ERASE: {
                int id_slot = task.req.slots_task().id_slot();
                server_slot *slot = get_slot_by_id(id_slot);
                if (slot == nullptr) {
                    send_error(task, "Invalid slot ID", ERROR_TYPE_INVALID_REQUEST);
                    break;
                }
                if (slot->is_processing()) {
                    // if requested slot is unavailable, we defer this task for processing later
                    SRV_DBG("requested slot is unavailable, defer task, id_task = %d\n", task.id);
                    queue_tasks.defer(task);
                    break;
                }

                // Erase token cache
                const size_t n_erased = slot->cache_tokens.size();
                llama_kv_cache_seq_rm(km_model.ctx, slot->id, -1, -1);
                slot->cache_tokens.clear();

                TaskResult result;
                result.id = task.id;
                result.stop = true;
                result.error = false;
                result.slots_task.set_id_slot(id_slot);
                result.slots_task.set_n_erased(n_erased);
                queue_results.send(result);
            }
                break;
            case SERVER_TASK_TYPE_SET_LORA: {
                common_lora_adapters_apply(km_model.ctx, km_model.loras);
                TaskResult result;
                result.id = task.id;
                result.stop = true;
                result.error = false;
                queue_results.send(result);
            }
                break;
        }
    }

    void KMContext::update_slots() {
        // check if all slots are idle
        {
            bool all_idle = true;

            for (auto &slot: slots) {
                if (slot.is_processing()) {
                    all_idle = false;
                    break;
                }
            }

            if (all_idle) {
                SRV_INF("%s", "all slots are idle\n");
                if (km_model.clean_kv_cache) {
                    kv_cache_clear();
                }

                return;
            }
        }

        {
            SRV_DBG("%s", "posting NEXT_RESPONSE\n");

            server_task task;
            task.type = SERVER_TASK_TYPE_NEXT_RESPONSE;
            task.id_target = -1;

            queue_tasks.post(task);
        }

        // apply context-shift if needed
        // TODO: simplify and improve
        for (server_slot &slot: slots) {
            if (slot.is_processing() && slot.n_past + 1 >= slot.n_ctx) {
                if (!params.ctx_shift) {
                    // this check is redundant (for good)
                    // we should never get here, because generation should already stopped in process_token()
                    slot.release();
                    send_error(slot, "context shift is disabled", ERROR_TYPE_SERVER);
                    continue;
                }

                // Shift context
                const int n_keep = slot.params.n_keep() + km_model.add_bos_token;
                const int n_left = slot.n_past - n_keep;
                const int n_discard = slot.params.n_discard() ? slot.params.n_discard() : (n_left / 2);

                SLT_WRN(slot, "slot context shift, n_keep = %d, n_left = %d, n_discard = %d\n", n_keep, n_left,
                        n_discard);

                llama_kv_cache_seq_rm(km_model.ctx, slot.id, n_keep, n_keep + n_discard);
                llama_kv_cache_seq_add(km_model.ctx, slot.id, n_keep + n_discard, slot.n_past, -n_discard);

                if (slot.params.cache_prompt()) {
                    for (size_t i = n_keep + n_discard; i < slot.cache_tokens.size(); i++) {
                        slot.cache_tokens[i - n_discard] = slot.cache_tokens[i];
                    }

                    slot.cache_tokens.resize(slot.cache_tokens.size() - n_discard);
                }

                slot.n_past -= n_discard;

                slot.truncated = true;
            }
        }

        // start populating the batch for this iteration
        common_batch_clear(batch);

        // frist, add sampled tokens from any ongoing sequences
        for (auto &slot: slots) {
            if (slot.state != SLOT_STATE_GENERATING) {
                continue;
            }

            slot.i_batch = batch.n_tokens;

            common_batch_add(batch, slot.sampled, slot.n_past, {slot.id}, true);

            slot.n_past += 1;

            if (slot.params.cache_prompt()) {
                slot.cache_tokens.push_back(slot.sampled);
            }

            SLT_DBG(slot, "slot decode token, n_ctx = %d, n_past = %d, n_cache_tokens = %d, truncated = %d\n",
                    slot.n_ctx, slot.n_past, (int) slot.cache_tokens.size(), slot.truncated);
        }

        // process in chunks of params.n_batch
        int32_t n_batch = llama_n_batch(km_model.ctx);
        int32_t n_ubatch = llama_n_ubatch(km_model.ctx);

        // track if this is an embedding or non-embedding batch
        // if we've added sampled tokens above, we are in non-embedding mode
        // -1: none, 0: non-embedding, 1: embedding
        // TODO: make enum
        int32_t batch_type = batch.n_tokens > 0 ? 0 : -1;

        // next, batch any pending prompts without exceeding n_batch
        if (params.cont_batching || batch.n_tokens == 0) {
            for (auto &slot: slots) {
                // this slot still has a prompt to be processed
                if (slot.state == SLOT_STATE_PROCESSING_PROMPT || slot.state == SLOT_STATE_STARTED) {
                    auto &prompt_tokens = slot.prompt_tokens;

                    // TODO: maybe move branch to outside of this loop in the future
                    if (slot.state == SLOT_STATE_STARTED) {
                        slot.t_start_process_prompt = ggml_time_us();
                        slot.t_start_generation = 0;

                        slot.n_past = 0;
                        slot.n_prompt_tokens = prompt_tokens.size();
                        slot.state = SLOT_STATE_PROCESSING_PROMPT;

                        SLT_INF(slot, "new prompt, n_ctx_slot = %d, n_keep = %d, n_prompt_tokens = %d\n",
                                slot.n_ctx, slot.params.n_keep(), slot.n_prompt_tokens);

                        // print prompt tokens (for debugging)
                        if (1) {
                            // first 16 tokens (avoid flooding logs)
                            for (int i = 0; i < std::min<int>(16, prompt_tokens.size()); i++) {
                                SLT_DBG(slot, "prompt token %3d: %6d '%s'\n", i, prompt_tokens[i],
                                        token_to_piece(prompt_tokens[i]).c_str());
                            }
                        } else {
                            // all
                            for (int i = 0; i < (int) prompt_tokens.size(); i++) {
                                SLT_DBG(slot, "prompt token %3d: %6d '%s'\n", i, prompt_tokens[i],
                                        token_to_piece(prompt_tokens[i]).c_str());
                            }
                        }

                        // empty prompt passed -> release the slot and send empty response
                        if (prompt_tokens.empty()) {
                            SLT_WRN(slot, "%s", "empty prompt - releasing slot\n");

                            slot.release();
                            slot.print_timings();
                            send_final_response(slot);
                            continue;
                        }

                        if (slot.inf_type == SERVER_TASK_INF_TYPE_EMBEDDING ||
                            slot.inf_type == SERVER_TASK_INF_TYPE_RERANK) {
                            if (slot.n_prompt_tokens > n_ubatch) {
                                slot.release();
                                send_error(slot, "input is too large to process. increase the physical batch size",
                                           ERROR_TYPE_SERVER);
                                continue;
                            }

                            if (slot.n_prompt_tokens > slot.n_ctx) {
                                slot.release();
                                send_error(slot, "input is larger than the max context size. skipping",
                                           ERROR_TYPE_SERVER);
                                continue;
                            }
                        } else {
                            if (!params.ctx_shift) {
                                // if context shift is disabled, we make sure prompt size is smaller than KV size
                                // TODO: there should be a separate parameter that control prompt truncation
                                //       context shift should be applied only during the generation phase
                                if (slot.n_prompt_tokens >= slot.n_ctx) {
                                    slot.release();
                                    send_error(slot,
                                               "the request exceeds the available context size. try increasing the context size or enable context shift",
                                               ERROR_TYPE_INVALID_REQUEST);
                                    continue;
                                }
                            }
                            if (slot.params.n_keep() < 0) {
                                slot.params.set_n_keep(slot.n_prompt_tokens);
                            }
                            slot.params.set_n_keep(std::min(slot.n_ctx - 4, slot.params.n_keep()));

                            // if input prompt is too big, truncate it
                            if (slot.n_prompt_tokens >= slot.n_ctx) {
                                const int n_left = slot.n_ctx - slot.params.n_keep();

                                const int n_block_size = n_left / 2;
                                const int erased_blocks =
                                        (slot.n_prompt_tokens - slot.params.n_keep() - n_block_size) / n_block_size;

                                std::vector<llama_token> new_tokens(
                                        prompt_tokens.begin(),
                                        prompt_tokens.begin() + slot.params.n_keep());

                                new_tokens.insert(
                                        new_tokens.end(),
                                        prompt_tokens.begin() + slot.params.n_keep() + erased_blocks * n_block_size,
                                        prompt_tokens.end());

                                prompt_tokens = std::move(new_tokens);

                                slot.truncated = true;
                                slot.n_prompt_tokens = prompt_tokens.size();

                                SLT_WRN(slot,
                                        "input truncated, n_ctx = %d, n_keep = %d, n_left = %d, n_prompt_tokens = %d\n",
                                        slot.n_ctx, slot.params.n_keep(), n_left, slot.n_prompt_tokens);

                                GGML_ASSERT(slot.n_prompt_tokens < slot.n_ctx);
                            }

                            if (slot.params.cache_prompt()) {
                                // reuse any previously computed tokens that are common with the new prompt
                                slot.n_past = longest_common_prefix(slot.cache_tokens, prompt_tokens);

                                // reuse chunks from the cached prompt by shifting their KV cache in the new position
                                if (params.n_cache_reuse > 0) {
                                    size_t head_c = slot.n_past; // cache
                                    size_t head_p = slot.n_past; // current prompt

                                    SLT_DBG(slot, "trying to reuse chunks with size > %d, slot.n_past = %d\n",
                                            params.n_cache_reuse, slot.n_past);

                                    while (head_c < slot.cache_tokens.size() &&
                                           head_p < prompt_tokens.size()) {

                                        size_t n_match = 0;
                                        while (head_c + n_match < slot.cache_tokens.size() &&
                                               head_p + n_match < prompt_tokens.size() &&
                                               slot.cache_tokens[head_c + n_match] ==
                                               prompt_tokens[head_p + n_match]) {

                                            n_match++;
                                        }

                                        if (n_match >= (size_t) params.n_cache_reuse) {
                                            SLT_INF(slot,
                                                    "reusing chunk with size %zu, shifting KV cache [%zu, %zu) -> [%zu, %zu)\n",
                                                    n_match, head_c, head_c + n_match, head_p, head_p + n_match);
                                            //for (size_t i = head_p; i < head_p + n_match; i++) {
                                            //    SLT_DBG(slot, "cache token %3zu: %6d '%s'\n", i, prompt_tokens[i], token_to_piece(prompt_tokens[i]).c_str());
                                            //}

                                            const int64_t kv_shift = (int64_t) head_p - (int64_t) head_c;

                                            llama_kv_cache_seq_rm(km_model.ctx, slot.id, head_p, head_c);
                                            llama_kv_cache_seq_add(km_model.ctx, slot.id, head_c, -1, kv_shift);

                                            for (size_t i = 0; i < n_match; i++) {
                                                slot.cache_tokens[head_p + i] = slot.cache_tokens[head_c + i];
                                                slot.n_past++;
                                            }

                                            head_c += n_match;
                                            head_p += n_match;
                                        } else {
                                            head_c += 1;
                                        }
                                    }

                                    SLT_DBG(slot, "after context reuse, new slot.n_past = %d\n", slot.n_past);
                                }
                            }
                        }

                        if (slot.n_past == slot.n_prompt_tokens && slot.n_past > 0) {
                            // we have to evaluate at least 1 token to generate logits.
                            SLT_WRN(slot,
                                    "need to evaluate at least 1 token to generate logits, n_past = %d, n_prompt_tokens = %d\n",
                                    slot.n_past, slot.n_prompt_tokens);

                            slot.n_past--;
                        }

                        slot.n_prompt_tokens_processed = 0;
                    }

                    // non-causal tasks require to fit the entire prompt in the physical batch
                    if (slot.inf_type == SERVER_TASK_INF_TYPE_EMBEDDING ||
                        slot.inf_type == SERVER_TASK_INF_TYPE_RERANK) {
                        // cannot fit the prompt in the current batch - will try next iter
                        if (batch.n_tokens + slot.n_prompt_tokens > n_batch) {
                            continue;
                        }
                    }

                    // check that we are in the right batch_type, if not defer the slot
                    const bool slot_type =
                            slot.inf_type == SERVER_TASK_INF_TYPE_EMBEDDING ||
                            slot.inf_type == SERVER_TASK_INF_TYPE_RERANK ? 1 : 0;

                    if (batch_type == -1) {
                        batch_type = slot_type;
                    } else if (batch_type != slot_type) {
                        continue;
                    }

                    // keep only the common part
                    if (!llama_kv_cache_seq_rm(km_model.ctx, slot.id, slot.n_past, -1)) {
                        // could not partially delete (likely using a non-Transformer model)
                        llama_kv_cache_seq_rm(km_model.ctx, slot.id, -1, -1);

                        // there is no common part left
                        slot.n_past = 0;
                    }

                    SLT_INF(slot, "kv cache rm [%d, end)\n", slot.n_past);

                    // remove the non-common part from the cache
                    slot.cache_tokens.resize(slot.n_past);

                    // add prompt tokens for processing in the current batch
                    while (slot.n_past < slot.n_prompt_tokens && batch.n_tokens < n_batch) {
                        common_batch_add(batch, prompt_tokens[slot.n_past], slot.n_past, {slot.id}, false);

                        if (slot.params.cache_prompt()) {
                            slot.cache_tokens.push_back(prompt_tokens[slot.n_past]);
                        }

                        slot.n_prompt_tokens_processed++;
                        slot.n_past++;
                    }

                    SLT_INF(slot, "prompt processing progress, n_past = %d, n_tokens = %d, progress = %f\n",
                            slot.n_past, batch.n_tokens,
                            (float) slot.n_prompt_tokens_processed / slot.n_prompt_tokens);

                    // entire prompt has been processed
                    if (slot.n_past == slot.n_prompt_tokens) {
                        slot.state = SLOT_STATE_DONE_PROMPT;

                        GGML_ASSERT(batch.n_tokens > 0);

                        common_sampler_reset(slot.smpl);

                        // Process all prompt tokens through sampler system
                        for (int i = 0; i < slot.n_prompt_tokens; ++i) {
                            common_sampler_accept(slot.smpl, prompt_tokens[i], false);
                        }

                        // extract the logits only for the last token
                        batch.logits[batch.n_tokens - 1] = true;

                        slot.n_decoded = 0;
                        slot.i_batch = batch.n_tokens - 1;

                        SLT_INF(slot, "prompt done, n_past = %d, n_tokens = %d\n", slot.n_past, batch.n_tokens);
                    }
                }

                if (batch.n_tokens >= n_batch) {
                    break;
                }
            }
        }

        if (batch.n_tokens == 0) {
            SRV_WRN("%s", "no tokens to decode\n");
            return;
        }

        SRV_DBG("decoding batch, n_tokens = %d\n", batch.n_tokens);

        // make sure we're in the right embedding mode
        llama_set_embeddings(km_model.ctx, batch_type == 1);

        // process the created batch of tokens
        for (int32_t i = 0; i < batch.n_tokens; i += n_batch) {
            const int32_t n_tokens = std::min(n_batch, batch.n_tokens - i);

            llama_batch batch_view = {
                    n_tokens,
                    batch.token + i,
                    nullptr,
                    batch.pos + i,
                    batch.n_seq_id + i,
                    batch.seq_id + i,
                    batch.logits + i,
            };

            const int ret = llama_decode(km_model.ctx, batch_view);
            metrics.on_decoded(slots);

            if (ret != 0) {
                if (n_batch == 1 || ret < 0) {
                    // if you get here, it means the KV cache is full - try increasing it via the context size
                    SRV_ERR("failed to decode the batch: KV cache is full - try increasing it via the context size, i = %d, n_batch = %d, ret = %d\n",
                            i, n_batch, ret);
                    for (auto &slot: slots) {
                        slot.release();
                        send_error(slot,
                                   "Input prompt is too big compared to KV size. Please try increasing KV size.");
                    }
                    break; // break loop of n_batch
                }

                // retry with half the batch size to try to find a free slot in the KV cache
                n_batch /= 2;
                i -= n_batch;

                SRV_WRN("failed to find free space in the KV cache, retrying with smaller batch size - try increasing it via the context size or enable defragmentation, i = %d, n_batch = %d, ret = %d\n",
                        i, n_batch, ret);

                continue; // continue loop of n_batch
            }

            for (auto &slot: slots) {
                if (slot.i_batch < (int) i || slot.i_batch >= (int) (i + n_tokens)) {
                    continue; // continue loop of slots
                }

                if (slot.state == SLOT_STATE_DONE_PROMPT) {
                    if (slot.inf_type == SERVER_TASK_INF_TYPE_EMBEDDING) {
                        // prompt evaluated for embedding
                        send_embedding(slot, batch_view);
                        slot.release();
                        slot.i_batch = -1;
                        continue; // continue loop of slots
                    }

                    if (slot.inf_type == SERVER_TASK_INF_TYPE_RERANK) {
                        send_rerank(slot, batch_view);
                        slot.release();
                        slot.i_batch = -1;
                        continue; // continue loop of slots
                    }

                    // prompt evaluated for next-token prediction
                    slot.state = SLOT_STATE_GENERATING;
                } else if (slot.state != SLOT_STATE_GENERATING) {
                    continue; // continue loop of slots
                }

                completion_token_output result;
                const llama_token id = common_sampler_sample(slot.smpl, km_model.ctx, slot.i_batch - i);

                common_sampler_accept(slot.smpl, id, true);

                slot.n_decoded += 1;
                if (slot.n_decoded == 1) {
                    slot.t_start_generation = ggml_time_us();
                    slot.t_prompt_processing = (slot.t_start_generation - slot.t_start_process_prompt) / 1e3;
                    metrics.on_prompt_eval(slot);
                }

                result.tok = id;

                const auto *cur_p = common_sampler_get_candidates(slot.smpl);

                for (size_t i = 0; i < (size_t) slot.sparams.n_probs; ++i) {
                    result.probs.push_back({
                                                   cur_p->data[i].id,
                                                   i >= cur_p->size ? 0.0f : cur_p->data[i].p,
                                           });
                }

                if (!process_token(result, slot)) {
                    // release slot because of stop condition
                    slot.release();
                    slot.print_timings();
                    send_final_response(slot);
                    metrics.on_prediction(slot);
                }

                slot.i_batch = -1;
            }
        }

        SRV_DBG("%s", "run slots completed\n");
    }

    void KMContext::send_final_response(const server_slot & slot) {
        TaskResult res;
        res.id       = slot.id_task;
        res.error    = false;
        res.stop     = true;
        !slot.params.stream() ? res.completions.set_content(slot.generated_text) : res.completions.set_content("");
        res.completions.set_stop(true);
        res.completions.set_model(params.model_alias);
        auto ss = trans_proto(slot);
        ss.set_prompt(detokenize(slot.prompt_tokens));
        *res.completions.mutable_slot() =ss;

        if (slot.sparams.n_probs > 0) {
            std::vector<completion_token_output> probs;
            if (!slot.params.stream() && slot.stopped_word) {
                const std::vector<llama_token> stop_word_toks = km_model.tokenize(slot.stopping_word, false);

                size_t safe_offset = std::min(slot.generated_token_probs.size(), stop_word_toks.size());
                probs = std::vector<completion_token_output>(
                        slot.generated_token_probs.begin(),
                        slot.generated_token_probs.end() - safe_offset);
            } else {
                probs = std::vector<completion_token_output>(
                        slot.generated_token_probs.begin(),
                        slot.generated_token_probs.end());
            }
            auto mcp = res.completions.mutable_completion_probabilities();
            for(auto &it : probs) {
                auto ptr = mcp->Add();
                ptr->set_text_to_send(it.text_to_send);
                ptr->set_tok(it.tok);
                for(auto &sit : it.probs) {
                    auto sptr = ptr->mutable_probs()->Add();
                    sptr->set_tok(sit.tok);
                    sptr->set_prob(sit.prob);
                }
            }
        }
        queue_results.send(res);
    }

    ModelMeta KMContext::get_meta() const {
        ModelMeta mm;
        mm.set_size(llama_model_size(km_model.model));
        mm.set_vocab_type(static_cast<LlamaVocabType>(llama_vocab_type(km_model.model)));
        mm.set_n_vocab(llama_n_vocab(km_model.model));
        mm.set_n_ctx_train(llama_n_ctx_train(km_model.model));
        mm.set_n_embd(llama_n_embd(km_model.model));
        mm.set_n_params(llama_model_n_params(km_model.model));
        return mm;
    }

    std::vector<std::vector<llama_token>> KMContext::tokenize_input_prompts(const KaiPrompts &prompt, bool add_special, bool parse_special,  turbo::Status &status) {
        std::vector<std::vector<llama_token>> result;
        std::vector<PromptsValue> prompts;
        if(!flatten_to_mix_lists(prompt, prompts)) {
            status = turbo::invalid_argument_error("bad data format with to more level sub list");
            return result;
        }
        for(auto &it : prompts) {
            result.push_back(tokenize_mixed(it, add_special, parse_special));
        }
        return result;
    }

    std::vector<llama_token>
    KMContext::tokenize_mixed(const PromptsValue&value, bool add_special, bool parse_special) const {
        std::vector<llama_token> prompt_tokens;

        if (value.has_list_value()) {
            bool first = true;
            for (const auto &it : value.list_value().values()) {
                if (it.has_string_value()) {
                    auto &s = it.string_value();

                    std::vector<llama_token> p;
                    if (first) {
                        p =km_model.tokenize(s, add_special, parse_special);
                        first = false;
                    } else {
                        p = km_model.tokenize(s, false, parse_special);
                    }

                    prompt_tokens.insert(prompt_tokens.end(), p.begin(), p.end());
                } else {
                    if (first) {
                        first = false;
                    }

                    prompt_tokens.push_back(it.number_value());
                }
            }
        } else if(value.has_string_value()){
            auto &s = value.string_value();
            prompt_tokens = km_model.tokenize(s, add_special, parse_special);
        }

        return prompt_tokens;
    }

    std::vector<llama_token> KMContext::tokenize(const std::string &text, bool add_special, bool parse_special) {
        // upper limit for the number of tokens
        int32_t n_tokens = text.length() + 2 * add_special;
        std::vector<llama_token> result(n_tokens);
        n_tokens = llama_tokenize(km_model.model, text.data(), text.length(), result.data(), result.size(), add_special, parse_special);
        if (n_tokens < 0) {
            result.resize(-n_tokens);
            int check = llama_tokenize(km_model.model, text.data(), text.length(), result.data(), result.size(), add_special, parse_special);
            GGML_ASSERT(check == -n_tokens);
        } else {
            result.resize(n_tokens);
        }
        return result;
    }

    std::string KMContext::token_to_piece(llama_token token, bool special) const {
        std::string piece;
        piece.resize(piece.capacity());  // using string internal cache, 15 bytes + '\n'
        const int n_chars = llama_token_to_piece(km_model.model, token, &piece[0], piece.size(), 0, special);
        if (n_chars < 0) {
            piece.resize(-n_chars);
            int check = llama_token_to_piece(km_model.model, token, &piece[0], piece.size(), 0, special);
            GGML_ASSERT(check == -n_chars);
        }
        else {
            piece.resize(n_chars);
        }

        return piece;
    }

    std::string KMContext::detokenize(const std::vector<llama_token> &tokens, bool special) const {
        std::string text;
        text.resize(std::max(text.capacity(), tokens.size()));
        int32_t n_chars = llama_detokenize(km_model.model, tokens.data(), (int32_t)tokens.size(), &text[0], (int32_t)text.size(), false, special);
        if (n_chars < 0) {
            text.resize(-n_chars);
            n_chars = llama_detokenize(km_model.model, tokens.data(), (int32_t)tokens.size(), &text[0], (int32_t)text.size(), false, special);
            GGML_ASSERT(n_chars <= (int32_t)text.size());  // whitespace trimming is performed after per-token detokenization
        }

        text.resize(n_chars);
        // NOTE: the original tokenizer decodes bytes after collecting the pieces.
        return text;
    }

    std::string KMContext::tokens_to_output_formatted_string(const llama_token token) const {
        std::string out = token == -1 ? "" : token_to_piece(token);

        // if the size is 1 and first bit is 1, meaning it's a partial character
        //   (size > 1 meaning it's already a known token)
        if (out.size() == 1 && (out[0] & 0x80) == 0x80) {
            std::stringstream ss;
            ss << std::hex << (out[0] & 0xff);
            std::string res(ss.str());
            out = "byte: \\x" + res;
        }

        return out;
    }

    bool KMContext::chat_verify_template(const std::string & tmpl) {
        llama_chat_message chat[] = {{"user", "test"}};
        int res = llama_chat_apply_template(nullptr, tmpl.c_str(), chat, 1, true, nullptr, 0);
        return res >= 0;
    }

    std::string KMContext::chat_apply_template(const std::string & tmpl,const std::vector<common_chat_msg> & msgs,bool add_ass) const {
        int alloc_size = 0;
        bool fallback = false; // indicate if we must fallback to default chatml
        std::vector<llama_chat_message> chat;
        for (auto & msg : msgs) {
            chat.push_back({msg.role.c_str(), msg.content.c_str()});
            alloc_size += (msg.role.size() + msg.content.size()) * 1.25;
        }

        const char * ptr_tmpl = tmpl.empty() ? nullptr : tmpl.c_str();
        std::vector<char> buf(alloc_size);

        // run the first time to get the total output length
        int32_t res = llama_chat_apply_template(km_model.model, ptr_tmpl, chat.data(), chat.size(), add_ass, buf.data(), buf.size());

        // error: chat template is not supported
        if (res < 0) {
            if (ptr_tmpl != nullptr) {
                // if the custom "tmpl" is not supported, we throw an error
                // this is a bit redundant (for good), since we're not sure if user validated the custom template with llama_chat_verify_template()
                throw std::runtime_error("this custom template is not supported");
            } else {
                // If the built-in template is not supported, we default to chatml
                res = llama_chat_apply_template(nullptr, "chatml", chat.data(), chat.size(), add_ass, buf.data(), buf.size());
                fallback = true;
            }
        }

        // if it turns out that our buffer is too small, we resize it
        if ((size_t) res > buf.size()) {
            buf.resize(res);
            res = llama_chat_apply_template(
                    fallback ? nullptr : km_model.model,
                    fallback ? "chatml" : ptr_tmpl,
                    chat.data(), chat.size(), add_ass, buf.data(), buf.size());
        }

        std::string formatted_chat(buf.data(), res);
        return formatted_chat;
    }

    std::string KMContext::chat_format_single(const std::string & tmpl,
                                          const std::vector<common_chat_msg> & past_msg,
                                          const common_chat_msg & new_msg,
                                          bool add_ass) const {
        std::ostringstream ss;
        auto fmt_past_msg = past_msg.empty() ? "" : chat_apply_template(tmpl, past_msg, false);
        std::vector<common_chat_msg> chat_new(past_msg);
        // if the past_msg ends with a newline, we must preserve it in the formatted version
        if (add_ass && !fmt_past_msg.empty() && fmt_past_msg.back() == '\n') {
            ss << "\n";
        };
        // format chat with new_msg
        chat_new.push_back(new_msg);
        auto fmt_new_msg = chat_apply_template(tmpl, chat_new, add_ass);
        // get the diff part
        ss << fmt_new_msg.substr(fmt_past_msg.size(), fmt_new_msg.size() - fmt_past_msg.size());
        return ss.str();
    }

    std::string KMContext::chat_format_example(const std::string & tmpl) const {
        std::vector<common_chat_msg> msgs = {
                {"system",    "You are a helpful assistant"},
                {"user",      "Hello"},
                {"assistant", "Hi there"},
                {"user",      "How are you?"},
        };
        return chat_apply_template(tmpl, msgs, true);
    }

    void KMContext::string_process_escapes(std::string & input) {
        std::size_t input_len = input.length();
        std::size_t output_idx = 0;

        for (std::size_t input_idx = 0; input_idx < input_len; ++input_idx) {
            if (input[input_idx] == '\\' && input_idx + 1 < input_len) {
                switch (input[++input_idx]) {
                    case 'n':  input[output_idx++] = '\n'; break;
                    case 'r':  input[output_idx++] = '\r'; break;
                    case 't':  input[output_idx++] = '\t'; break;
                    case '\'': input[output_idx++] = '\''; break;
                    case '\"': input[output_idx++] = '\"'; break;
                    case '\\': input[output_idx++] = '\\'; break;
                    case 'x':
                        // Handle \x12, etc
                        if (input_idx + 2 < input_len) {
                            const char x[3] = { input[input_idx + 1], input[input_idx + 2], 0 };
                            char *err_p = nullptr;
                            const long val = std::strtol(x, &err_p, 16);
                            if (err_p == x + 2) {
                                input_idx += 2;
                                input[output_idx++] = char(val);
                                break;
                            }
                        }
                        // fall through
                    default:   input[output_idx++] = '\\';
                        input[output_idx++] = input[input_idx]; break;
                }
            } else {
                input[output_idx++] = input[input_idx];
            }
        }

        input.resize(output_idx);
    }

    std::string KMContext::string_from(const struct llama_batch & batch) const {
        std::stringstream buf;

        buf << "[ ";

        bool first = true;
        for (int i = 0; i < batch.n_tokens; ++i) {
            if (!first) {
                buf << ", ";
            } else {
                first = false;
            }

            auto detokenized = token_to_piece(batch.token[i]);

            detokenized.erase(
                    std::remove_if(
                            detokenized.begin(),
                            detokenized.end(),
                            [](const unsigned char c) { return !std::isprint(c); }),
                    detokenized.end());

            buf << "\n" << std::to_string(i)
                << ":token '" << detokenized << "'"
                << ":pos " << std::to_string(batch.pos[i])
                << ":n_seq_id  " << std::to_string(batch.n_seq_id[i])
                << ":seq_id " << std::to_string(batch.seq_id[i][0])
                << ":logits " << std::to_string(batch.logits[i]);
        }

        buf << " ]";

        return buf.str();
    }

    std::string KMContext::string_from(const std::vector<llama_token> & tokens) const {
        std::stringstream buf;

        buf << "[ ";

        bool first = true;
        for (const auto & token : tokens) {
            if (!first) {
                buf << ", ";
            } else {
                first = false;
            }

            auto detokenized = token_to_piece(token);

            detokenized.erase(
                    std::remove_if(
                            detokenized.begin(),
                            detokenized.end(),
                            [](const unsigned char c) { return !std::isprint(c); }),
                    detokenized.end());

            buf << "'" << detokenized << "'"
                << ":" << std::to_string(token);
        }

        buf << " ]";

        return buf.str();
    }
}  // namespace kllm
