// Copyright (C) 2024 Kumo inc.
// Author: Jeff.li lijippy@163.com
// All rights reserved.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program.  If not, see <https://www.gnu.org/licenses/>.
//

#include <kllm/openai/oai_processor.h>
#include <nlohmann/json.hpp>
#include <kllm/kai/kai.h>
#include <kllm/openai/oai.h>
#include <turbo/strings/match.h>

#include "kllm/public/index.html.h"
#include "kllm/public/completion.js.h"
#include "kllm/public/loading.html.h"
#include "kllm/public/deps_daisyui.min.css.h"
#include "kllm/public/deps_markdown-it.js.h"
#include "kllm/public/deps_tailwindcss.js.h"
#include "kllm/public/deps_vue.esm-browser.js.h"
#include "kllm/public/favicon.png.h"
#include "kllm/public/kumo_logo.svg.h"


#define MIMETYPE_JSON "application/json; charset=utf-8"

namespace kllm {

    static std::string llama_get_chat_template(const struct llama_model * model) {
        std::string template_key = "tokenizer.chat_template";
        // call with NULL buffer to get the total size of the string
        int32_t res = llama_model_meta_val_str(model, template_key.c_str(), NULL, 0);
        if (res < 0) {
            return "";
        } else {
            std::vector<char> model_template(res, 0);
            llama_model_meta_val_str(model, template_key.c_str(), model_template.data(), model_template.size());
            return std::string(model_template.data(), model_template.size());
        }
    }

    static bool server_sent_event(krpc::ProgressiveAttachment *sink, const char *event, const nlohmann::ordered_json &data) {
        const std::string str =
                std::string(event) + ": " +
                data.dump(-1, ' ', false, nlohmann::ordered_json::error_handler_t::replace) +
                "\n\n"; // note: these newlines are important (not sure why though, if you know, add a comment to explain)

        LOG_DBG("data stream, to_send: %s", str.c_str());

        return sink->Write(str.c_str(), str.size()) == 0 ;
    }

    void res_error(krpc::RestfulResponse *response, const nlohmann::ordered_json &error_data) {
        nlohmann::ordered_json final_response{{"error", error_data}};
        response->set_body(final_response.dump(-1, ' ', false, nlohmann::ordered_json::error_handler_t::replace));
        response->set_content_type(MIMETYPE_JSON);
        response->set_status_code(json_value(error_data, "code", 500));
    };

    void res_error(krpc::RestfulResponse *response, const turbo::Status &status) {
        nlohmann::ordered_json err = format_aoi_error_response(status);
        nlohmann::ordered_json final_response{{"error", err}};
        response->set_body(final_response.dump(-1, ' ', false, nlohmann::ordered_json::error_handler_t::replace));
        response->set_content_type(MIMETYPE_JSON);
        response->set_status_code(500);
    };

    void res_method_not_allow(krpc::RestfulResponse *response) {
        auto data = format_aoi_error_response("method not allow on this path", ERROR_TYPE_PERMISSION);
        response->set_body(data.dump(-1, ' ', false, nlohmann::ordered_json::error_handler_t::replace));
        response->set_content_type(MIMETYPE_JSON);
        response->set_status_code(502);
    }
    static void res_ok(krpc::RestfulResponse *response, const nlohmann::ordered_json &data) {
        response->set_body(data.dump(-1, ' ', false, nlohmann::ordered_json::error_handler_t::replace));
        response->set_content_type(MIMETYPE_JSON);
        response->set_status_code(200);
    };


    void HealthProcessor::process(const krpc::RestfulRequest *request, krpc::RestfulResponse *response) {
        static nlohmann::ordered_json health = {{"status", "ok"}};
        response->set_body(health.dump());
        response->set_status_code(200);
        response->set_content_type(MIMETYPE_JSON);
    }

    bool HealthProcessor::is_wildcards() const {
        return false;
    }

    void MetricProcessor::process(const krpc::RestfulRequest *request, krpc::RestfulResponse *response)  {
        KaiResponse km_response;
        KaiRequest km_request;
        KaiMetrics metric(context);
        metric.metrics(km_request, km_response);
        if (km_response.status().code() != 0) {
            res_error(response, format_aoi_error_response(km_response.status().errmsg(),
                                                     static_cast<ErrorType>(km_response.status().code())));
            return;
        }
        auto str = format_aoi_prometheus_metrics_response(km_response);
        response->set_header("Process-Start-Time-Unix", std::to_string(km_response.metrics().t_start()));
        response->set_content_type("text/plain; version=0.0.4");
        response->set_body(str);
        response->set_status_code(200); // HTTP OK
    }

    bool MetricProcessor::is_wildcards() const  {
        return false;
    }

    void PropsProcessor::process(const krpc::RestfulRequest *request, krpc::RestfulResponse *response) {
        if(request->method() == krpc::HTTP_METHOD_GET) {
            get(request, response);
        } else if (request->method() == krpc::HTTP_METHOD_POST) {
            post(request, response);
        } else {
            res_method_not_allow(response);
        }

    }

    void PropsProcessor::get(const krpc::RestfulRequest *request, krpc::RestfulResponse *response) {
        nlohmann::ordered_json data = {
                {"default_generation_settings", get_default_generation_settings_for_props(context)},
                {"total_slots",                 context->params.n_parallel},
                {"chat_template",               llama_get_chat_template(context->km_model.model)},
        };

        res_ok(response, data);
    }

    void PropsProcessor::post(const krpc::RestfulRequest *request, krpc::RestfulResponse *response) {
        if (!context->params.endpoint_props) {
            res_error(response, format_aoi_error_response(
                    "This server does not support changing global properties. Start it with `--props`",
                    ERROR_TYPE_NOT_SUPPORTED));
            return;
        }

        nlohmann::ordered_json data = nlohmann::ordered_json::parse(request->body().to_string());

        // update any props here

        res_ok(response, {{"success", true}});
    }

    bool PropsProcessor::is_wildcards() const {
        return false;
    }

    void ModelsProcessor::process(const krpc::RestfulRequest *request, krpc::RestfulResponse *response) {
        kllm::KaiResponse kumo_res;
        kllm::KaiRequest kumo_req;
        kumo_req.set_query_type(kllm::QUERY_MODEL_LIST);
        KaiModels model(context);
        model.list(kumo_req, kumo_res);
        std::string errmsg;
        int code;
        nlohmann::ordered_json json;
        format_aoi_model_list_response(kumo_res, json, errmsg, code);
        response->set_body(json.dump());
        res_ok(response, json);
    }

    bool ModelsProcessor::is_wildcards() const {
        return false;
    }

    void CompletionsProcessor::process(const krpc::RestfulRequest *request, krpc::RestfulResponse *response) {

        if (context->params.embedding) {
            res_error(response, format_aoi_error_response(
                    "This server does not support completions. Start it without `--embeddings`",
                    ERROR_TYPE_NOT_SUPPORTED));
            return;
        }

        KaiRequest km_req;
        auto rs = parse_oai_completions_request(request->body().to_string(), context, km_req);
        if (!rs.ok()) {
            auto err = format_aoi_error_response(rs);
            res_error(response, err);
        }
        ////
        if (!km_req.slot_params().stream()) {
            KaiResponse km_res;
            KaiCompletions completion(context);
            completion.completions(km_req, km_res);
            nlohmann::ordered_json arr = nlohmann::ordered_json::array();;
            for (auto &it: km_res.completions()) {
                nlohmann::ordered_json data;
                format_aoi_completions_response(it, context, data);
                arr.push_back(data);
            }

            res_ok(response, arr);
        } else {
            KaiResponse km_res;

            KaiCompletions completion(context);
            auto writer = response->get_chunk_streamer();
            completion.completions_stream(km_req, km_res,  [&writer, this](const CompletionsResult &result) -> bool {
                nlohmann::ordered_json data;
                format_aoi_completions_response(result, this->context, data);
                return server_sent_event(writer, "data", data) && !result.stop();
            }, [&km_res, response]() {
                if (km_res.status().code() != 0) {
                    // handle error
                    turbo::Status rs(static_cast<turbo::StatusCode>(km_res.status().code()),
                                     km_res.status().errmsg());
                    res_error(response, rs);
                }
            });
        }
    }

    bool CompletionsProcessor::is_wildcards() const  {
        return false;
    }

    void ChatCompletionsProcessor::process_with_done(const krpc::RestfulRequest *request, krpc::RestfulResponse *response, ::google::protobuf::Closure *done) {
        if (context->params.embedding) {
            res_error(response, format_aoi_error_response(
                    "This server does not support completions. Start it without `--embeddings`",
                    ERROR_TYPE_NOT_SUPPORTED));
            return;
        }

        KaiRequest km_req;
        auto rs = parse_oai_chat_completions_request(nlohmann::ordered_json::parse(request->body().to_string()), context, km_req);
        if (!rs.ok()) {
            auto err = format_aoi_error_response(rs);
            res_error(response, err);
        }
        if(!km_req.slot_params().stream()) {
            krpc::ClosureGuard done_guard(done);
            KaiChatCompletions chat(context);
            KaiResponse km_response;
            chat.chat(km_req, km_response);
            if(km_response.status().code() != 0) {
                res_error(response, format_aoi_error_response(km_response));
                return;
            }
            nlohmann::ordered_json body;
            format_aoi_chat_completions_final_response(km_response.completions(0), context, body, false);
            res_ok(response, body);
        } else {
            KaiResponse km_res;
            KaiChatCompletions completion(context);
            auto writer = response->get_chunk_streamer();
            done->Run();
            completion.chat_stream(km_req, km_res,  [&](const CompletionsResult &result) -> bool {
                auto  data = format_partial_response_oaicompat(result, context);
                if(data.empty()) {
                    return true;
                }
                auto r= server_sent_event(writer, "data", data);
                LOG(INFO)<<"debug: "<<data;
                LOG(INFO)<<"stop: "<<result.stop()<<" send success: "<<r;
                return  r && !result.stop();
            }, [&]() {
                if (km_res.status().code() != 0) {
                    // handle error
                    turbo::Status rs(static_cast<turbo::StatusCode>(km_res.status().code()),
                                     km_res.status().errmsg());
                    res_error(response, rs);
                }
            });
            static const std::string ev_done = "data: [DONE]\n\n";
            writer->Write(ev_done.data(), ev_done.size());
        }
    }

    bool ChatCompletionsProcessor::is_wildcards() const {
        return false;
    }

    void InfillProcessor::process(const krpc::RestfulRequest *request, krpc::RestfulResponse *response) {
        if (context->params.embedding) {
            res_error(response, format_aoi_error_response(
                    "This server does not support completions. Start it without `--embeddings`",
                    ERROR_TYPE_NOT_SUPPORTED));
            return;
        }
        // check model compatibility
        std::string err;
        if (llama_token_fim_pre(context->km_model.model) == LLAMA_TOKEN_NULL) {
            err += "prefix token is missing. ";
        }
        if (llama_token_fim_suf(context->km_model.model) == LLAMA_TOKEN_NULL) {
            err += "suffix token is missing. ";
        }
        if (llama_token_fim_mid(context->km_model.model) == LLAMA_TOKEN_NULL) {
            err += "middle token is missing. ";
        }
        if (!err.empty()) {
            res_error(response,
                      format_aoi_error_response(turbo::str_format("Infill is not supported by this model: %s", err.c_str()),
                                                ERROR_TYPE_NOT_SUPPORTED));
            return;
        }
        KaiRequest km_req;
        auto rs =  parse_oai_infill_request(request->body().to_string(), context,km_req);
        if(!rs.ok()) {
            res_error(response, format_aoi_error_response(rs));
            return;
        }

        if (!km_req.slot_params().stream()) {
            KaiResponse km_res;
            KaiInfill infill(context);
            infill.infill(km_req, km_res);
            nlohmann::ordered_json arr = nlohmann::ordered_json::array();;
            for (auto &it: km_res.completions()) {
                nlohmann::ordered_json data;
                format_aoi_completions_response(it, context, data);
                arr.push_back(data);
            }

            res_ok(response, arr);
        } else {

            KaiResponse km_res;
            KaiInfill infill(context);
            auto writer = response->get_chunk_streamer();
            infill.infill_stream(km_req, km_res,  [&](const CompletionsResult &result) -> bool {
                nlohmann::ordered_json data;
                format_aoi_completions_response(result, context, data);
                return server_sent_event(writer, "data", data) && !result.stop();
            }, [&]() {
                if (km_res.status().code() != 0) {
                    // handle error
                    turbo::Status rs(static_cast<turbo::StatusCode>(km_res.status().code()),
                                     km_res.status().errmsg());
                    LOG(INFO)<<rs.to_string();
                }
            });
        }
    }

    bool InfillProcessor::is_wildcards() const {
        return false;
    }

    void EmbeddingProcessor::process(const krpc::RestfulRequest *request, krpc::RestfulResponse *response) {
        KaiEmbeddings embd(context);
        KaiRequest km_req;
        KaiResponse km_res;
        km_req.set_query_type(QUERY_EMBEDDING);
        auto rs = parse_oai_embedding_request(request->body().to_string(), context, km_req);
        if (!rs.ok()) {
            res_error(response, format_aoi_error_response(rs));
            return;
        }
        embd.embedding(km_req, km_res);
        // write JSON response
        if (km_res.status().code() == 0) {
            nlohmann::ordered_json json;
            int err_code;
            std::string errmsg;
            format_aoi_embedding_response(km_res, json, errmsg, err_code);
            res_ok(response, json);
        } else {
            res_error(response, format_aoi_error_response(rs));
        }
    }

    bool EmbeddingProcessor::is_wildcards() const {
        return false;
    }

    void RerankProcessor::process(const krpc::RestfulRequest *request, krpc::RestfulResponse *response) {
        if (!context->params.reranking || context->params.embedding) {
            res_error(response, format_aoi_error_response(
                    "This server does not support reranking. Start it with `--reranking` and without `--embedding`",
                    ERROR_TYPE_NOT_SUPPORTED));
            return;
        }

        KaiRequest km_req;
        KaiResponse km_res;
        auto rs = parse_oai_rerank_request(request->body().to_string(), context, km_req);
        if (!rs.ok()) {
            res_error(response, format_aoi_error_response(rs));
            return;
        }

        KaiRerank ranker(context);

        ranker.rank(km_req, km_res);
        nlohmann::ordered_json json;
        rs = format_aoi_rerank_response(km_res, json);
        if (!rs.ok()) {
            res_error(response, format_aoi_error_response(rs));
            return;
        }
        res_ok(response, json);
        return;
    }

    bool RerankProcessor::is_wildcards() const {
        return false;
    }

    void TokenizeProcessor::process(const krpc::RestfulRequest *request, krpc::RestfulResponse *response) {
        KaiRequest km_req;
        KaiResponse km_res;
        auto rs = parse_oai_tokenize_request(request->body().to_string(), context, km_req);
        if (!rs.ok()) {
            res_error(response, format_aoi_error_response(rs.to_string(), ERROR_TYPE_INVALID_REQUEST));
            return;
        }
        km_req.set_query_type(QUERY_TOKENIZE);
        KaiTokenize tokenize(context);
        tokenize.tokenize(km_req, km_res);
        if (km_res.status().code() != 0) {
            res_error(response, format_aoi_error_response(km_res.status().errmsg(), ERROR_TYPE_INVALID_REQUEST));
            return;
        }
        nlohmann::ordered_json obj;
        rs = format_aoi_tokenize_response(km_res, context, obj);
        if (!rs.ok()) {
            res_error(response, format_aoi_error_response(rs.to_string(), ERROR_TYPE_INVALID_REQUEST));
            return;
        }
        res_ok(response, obj);
    }

    bool TokenizeProcessor::is_wildcards() const {
        return false;
    }

    void DetokenizeProcessor::process(const krpc::RestfulRequest *request, krpc::RestfulResponse *response) {
        KaiRequest km_req;
        KaiResponse km_res;
        auto rs = parse_oai_detokenize_request(response->body().to_string(), context, km_req);
        if (!rs.ok()) {
            res_error(response, format_aoi_error_response(rs.to_string(), ERROR_TYPE_INVALID_REQUEST));
            return;
        }
        km_req.set_query_type(QUERY_DETOKENIZE);
        KaiTokenize tokenize(context);
        tokenize.detokenize(km_req, km_res);
        if (km_res.status().code() != 0) {
            res_error(response, format_aoi_error_response(km_res.status().errmsg(), ERROR_TYPE_INVALID_REQUEST));
            return;
        }
        nlohmann::ordered_json obj;
        rs = format_aoi_detokenize_response(km_res, context, obj);
        if (!rs.ok()) {
            res_error(response, format_aoi_error_response(rs));
            return;
        }
        res_ok(response, obj);
    }

    bool DetokenizeProcessor::is_wildcards() const {
        return false;
    }

    void LoraListProcessor::process(const krpc::RestfulRequest *request, krpc::RestfulResponse *response) {
        KaiRequest km_req;
        KaiResponse km_res;
        KaiLora lora(context);
        lora.lora(km_req, km_res);
        if (km_res.status().code() != 0) {
            res_error(response, format_aoi_error_response(km_res.status().errmsg(), ERROR_TYPE_INVALID_REQUEST));
            return;
        }
        nlohmann::ordered_json obj;
        format_aoi_lora_response(km_res, obj);
        res_ok(response, obj);
    }

    bool LoraListProcessor::is_wildcards() const {
        return false;
    }

    void LoraApplyProcessor::process(const krpc::RestfulRequest *request, krpc::RestfulResponse *response) {
        KaiRequest km_req;
        KaiResponse km_res;
        auto rs = parse_oai_lora_request(request->body().to_string(), context, km_req);
        if (!rs.ok()) {
            res_error(response, format_aoi_error_response(rs.to_string(), ERROR_TYPE_INVALID_REQUEST));
            return;
        }

        KaiLora lora(context);
        lora.lora_apply(km_req, km_res);
        if (km_res.status().code() != 0) {
            res_error(response, format_aoi_error_response(km_res.status().errmsg(), ERROR_TYPE_INVALID_REQUEST));
            return;
        }
        nlohmann::ordered_json obj;
        format_aoi_lora_apply_response(km_res, obj);
        res_ok(response, obj);
    }

    bool LoraApplyProcessor::is_wildcards() const {
        return false;
    }

    void SlotsProcessor::process(const krpc::RestfulRequest *request, krpc::RestfulResponse *response) {
        auto path_param = request->unresolved_path();
        if(path_param.empty()) {
            get(request, response);
        } else {
            action(request, response);
        }
    }

    void SlotsProcessor::get(const krpc::RestfulRequest *request, krpc::RestfulResponse *response) {
        KaiResponse km_response;
        KaiRequest km_request;
        KaiMetrics metric(context);
        km_request.set_query_type(QUERY_METRICS);
        metric.slots(km_request, km_response);
        if (km_response.status().code() != 0) {
            res_error(response, format_aoi_error_response(km_response.status().errmsg(),
                                                     static_cast<ErrorType>(km_response.status().code())));
            return;
        }
        nlohmann::ordered_json obj;
        format_aoi_slot_response(km_response, obj);

        const int n_idle_slots = km_response.metrics().n_idle_slots();
        if (km_request.fail_on_no_slot()) {
            if (n_idle_slots == 0) {
                res_error(response, format_aoi_error_response("no slot available", ERROR_TYPE_UNAVAILABLE));
                return;
            }
        }

        res_ok(response, obj);
    }
    void SlotsProcessor::action(const krpc::RestfulRequest *request, krpc::RestfulResponse *response) {
        if (context->params.slot_save_path.empty()) {
            res_error(response, format_aoi_error_response(
                    "This server does not support slots action. Start it with `--slot-save-path`",
                    ERROR_TYPE_NOT_SUPPORTED));
            return;
        }

        auto path_param = request->unresolved_path();
        if(turbo::starts_with(path_param, ":")) {
            path_param = path_param.substr(1);
        }
        int id_slot;
        if(!turbo::simple_atoi(path_param, &id_slot)) {
            res_error(response, format_aoi_error_response("Invalid slot ID", ERROR_TYPE_INVALID_REQUEST));
        }

        KaiRequest km_req;
        KaiResponse km_res;
        auto action_query = request->uri().GetQuery("action");
        std::string action = action_query ? *action_query : "";
        if (action == "save") {
            km_req.set_query_type(QUERY_SLOTS_SAVE);
        } else if (action == "restore") {
            km_req.set_query_type(QUERY_SLOTS_RESTORE);
        } else if (action == "erase") {
            km_req.set_query_type(QUERY_SLOTS_ERASE);
        } else {
            res_error(response, format_aoi_error_response("Invalid action", ERROR_TYPE_INVALID_REQUEST));
        }
        km_req.mutable_slots_task()->set_id_slot(id_slot);
        auto rs = parse_oai_slots_request(request->body().to_string(), context, km_req);
        if (!rs.ok()) {
            auto err = format_aoi_error_response(rs);
            res_error(response, err);
        }
        KaiSlots slots(context);
        if (action == "save") {
            km_req.set_query_type(QUERY_SLOTS_SAVE);
            slots.save(km_req, km_res);
        } else if (action == "restore") {
            km_req.set_query_type(QUERY_SLOTS_RESTORE);
            slots.save(km_req, km_res);
        } else if (action == "erase") {
            km_req.set_query_type(QUERY_SLOTS_ERASE);
            slots.erase(km_req, km_res);
        }
        if(km_res.status().code() != 0) {
            auto err = format_aoi_error_response(km_res);
            res_error(response, err);
        }
        nlohmann::ordered_json res_body;
        rs = format_aoi_slots_response(km_res, res_body);
        if (!rs.ok()) {
            auto err = format_aoi_error_response(rs);
            res_error(response, err);
        }

        res_ok(response, res_body);
    }

    bool SlotsProcessor::is_wildcards() const {
        return true;
    }

    void RootProcessor::process(const krpc::RestfulRequest *request, krpc::RestfulResponse *response) {
        response->set_body(reinterpret_cast<const char *>(index_html), index_html_len);
        response->set_status_code(200);
        response->set_content_type("text/html; charset=utf-8");
    }

    bool RootProcessor::is_wildcards() const {
        return false;
    }


    void setup_oai_api(KMContext *c) {
        // APIS
        // register API routes
        auto ins = krpc::RestfulService::instance();
        ins->set_processor("/health", std::shared_ptr<krpc::RestfulProcessor>(new HealthProcessor())); // public endpoint (no API key check) done
        ins->set_processor("/metrics", std::shared_ptr<krpc::RestfulProcessor>(new MetricProcessor(c)));  // done
        ins->set_processor("/props", std::shared_ptr<krpc::RestfulProcessor>(new PropsProcessor(c)));  // done
        ins->set_processor("/models", std::shared_ptr<krpc::RestfulProcessor>(new ModelsProcessor(c))); // public endpoint (no API key check) done
        ins->set_processor("/v1/models", std::shared_ptr<krpc::RestfulProcessor>(new ModelsProcessor(c))); // public endpoint (no API key check) done
        ins->set_processor("/completion", std::shared_ptr<krpc::RestfulProcessor>(new CompletionsProcessor(c))); // legacy
        ins->set_processor("/completions", std::shared_ptr<krpc::RestfulProcessor>(new CompletionsProcessor(c))); // done
        ins->set_processor("/v1/completions", std::shared_ptr<krpc::RestfulProcessor>(new CompletionsProcessor(c)));  // done
        ins->set_processor("/chat/completions", std::shared_ptr<krpc::RestfulProcessor>(new ChatCompletionsProcessor(c)));  // done
        ins->set_processor("/v1/chat/completions", std::shared_ptr<krpc::RestfulProcessor>(new ChatCompletionsProcessor(c)));  // done
        ins->set_processor("/infill", std::shared_ptr<krpc::RestfulProcessor>(new InfillProcessor(c)));  // done
        ins->set_processor("/embedding", std::shared_ptr<krpc::RestfulProcessor>(new EmbeddingProcessor(c))); // legacy done
        ins->set_processor("/embeddings", std::shared_ptr<krpc::RestfulProcessor>(new EmbeddingProcessor(c)));  // done
        ins->set_processor("/v1/embeddings", std::shared_ptr<krpc::RestfulProcessor>(new EmbeddingProcessor(c)));  // done
        ins->set_processor("/rerank", std::shared_ptr<krpc::RestfulProcessor>(new RerankProcessor(c)));  // done
        ins->set_processor("/reranking", std::shared_ptr<krpc::RestfulProcessor>(new RerankProcessor(c)));  // done
        ins->set_processor("/v1/rerank", std::shared_ptr<krpc::RestfulProcessor>(new RerankProcessor(c)));  // done
        ins->set_processor("/v1/reranking", std::shared_ptr<krpc::RestfulProcessor>(new RerankProcessor(c)));  // done
        ins->set_processor("/tokenize", std::shared_ptr<krpc::RestfulProcessor>(new TokenizeProcessor(c)));  // done
        ins->set_processor("/detokenize", std::shared_ptr<krpc::RestfulProcessor>(new DetokenizeProcessor(c)));  // done
        // LoRA adapters hotswap
        ins->set_processor("/lora-adapters", std::shared_ptr<krpc::RestfulProcessor>(new LoraListProcessor(c))); //done
        ins->set_processor("/lora-adapters-apply", std::shared_ptr<krpc::RestfulProcessor>(new LoraApplyProcessor(c)));   // done
        // Save & load slots
        ins->set_processor("/slots", std::shared_ptr<krpc::RestfulProcessor>(new SlotsProcessor(c))); // done
    }

    void setup_oai_ui(KMContext *c) {
        // register static assets routes
        auto handle_static_content = [](const std::string &path, unsigned char *content, size_t len, const char *mime_type) {
            std::string body(reinterpret_cast<const char *>(content), len);
            auto ins = krpc::RestfulService::instance();
            ins->set_static_content_processor(path,body, mime_type);
        };
        auto public_path = c->params.public_path;
        auto ins = krpc::RestfulService::instance();
        if (!public_path.empty()) {
            // Set the base directory for serving static files
            auto sptr = new krpc::StaticFileProcessor(public_path);
            auto mime_type = krpc::RestfulService::get_default_mime_type();
            // done something here
            sptr->set_mime_type(mime_type);
            if(public_path == "/") {
                ins->set_root_processor(std::shared_ptr<krpc::RestfulProcessor>(sptr));
            } else {
                ins->set_processor(public_path, std::shared_ptr<krpc::RestfulProcessor>(sptr));
            }
        } else {
            // using embedded static files
            auto ins = krpc::RestfulService::instance();
            ins->set_root_processor(std::shared_ptr<krpc::RestfulProcessor>(new RootProcessor()));
            handle_static_content("/index.html", index_html, index_html_len, "text/html; charset=utf-8");
            handle_static_content("/completion.js", completion_js, completion_js_len, "text/javascript; charset=utf-8");
            handle_static_content("/deps_daisyui.min.css", deps_daisyui_min_css, deps_daisyui_min_css_len, "text/css; charset=utf-8");
            handle_static_content("/deps_markdown-it.js", deps_markdown_it_js, deps_markdown_it_js_len, "text/javascript; charset=utf-8");
            handle_static_content("/deps_tailwindcss.js", deps_tailwindcss_js, deps_tailwindcss_js_len, "text/javascript; charset=utf-8");
            handle_static_content("/deps_vue.esm-browser.js", deps_vue_esm_browser_js, deps_vue_esm_browser_js_len,
                                                                    "text/javascript; charset=utf-8");
            handle_static_content("/favicon.png", favicon_png, favicon_png_len, "image/png");
            handle_static_content("/kumo_logo.svg", kumo_logo_svg, kumo_logo_svg_len, "image/svg+xml");
        }
    }
}  // namespace kllm

