// Copyright (C) 2024 Kumo inc.
// Author: Jeff.li lijippy@163.com
// All rights reserved.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program.  If not, see <https://www.gnu.org/licenses/>.
//
//

#include <kllm/openai/request.h>
#include <kllm/utility/json.h>
#include <kllm/utility/json_schema_to_grammar.h>
#include <kllm/core/task_slot.h>

namespace kllm {
    namespace internal {
        turbo::Result<std::string>
        format_chat(const KMContext *ctx, const std::vector<nlohmann::ordered_json> &messages) {
            std::vector<common_chat_msg> chat;

            for (size_t i = 0; i < messages.size(); ++i) {
                const auto &curr_msg = messages[i];

                std::string role = json_value(curr_msg, "role", std::string(""));

                std::string content;
                if (curr_msg.contains("content")) {
                    if (curr_msg["content"].is_string()) {
                        content = curr_msg["content"].get<std::string>();
                    } else if (curr_msg["content"].is_array()) {
                        for (const auto &part: curr_msg["content"]) {
                            if (part.contains("text")) {
                                content += "\n" + part["text"].get<std::string>();
                            }
                        }
                    } else {
                        return turbo::invalid_argument_error(
                                "Invalid 'content' type (ref: https://github.com/ggerganov/llama.cpp/issues/8367)");
                    }
                } else {
                    return turbo::invalid_argument_error(
                            "Missing 'content' (ref: https://github.com/ggerganov/llama.cpp/issues/8367)");
                }

                chat.push_back({role, content});
            }

            const auto formatted_chat = ctx->chat_apply_template(ctx->params.chat_template, chat, true);
            VLOG(300) << turbo::str_format("formatted_chat: '%s'\n", formatted_chat.c_str());

            return formatted_chat;
        }

        static turbo::Status parse_aoi_input_request(const nlohmann::ordered_json &obj, KaiPrompts *request) {
            auto icnt = obj.count("input");
            auto ccnt = obj.count("content");

            if (icnt != 0 && ccnt != 0) {
                return turbo::invalid_argument_error("both \"input\" and \"content\" are provided, must the of these");
            }
            nlohmann::ordered_json ipt;
            if (icnt != 0) {
                ipt = obj.at("input");
            } else {
                ipt = obj.at("content");
            }
            if (ipt.is_string()) {
                PromptsValue value;
                value.set_string_value(ipt.get<std::string>());
                *request->mutable_values()->Add() = std::move(value);
                return turbo::OkStatus();
            }

            if (ipt.is_number()) {
                PromptsValue value;
                value.set_number_value(ipt.get<int32_t>());
                *request->mutable_values()->Add() = std::move(value);
                return turbo::OkStatus();
            }
            if (ipt.is_array()) {
                for (auto &it: ipt) {
                    if (it.is_string()) {
                        PromptsValue value;
                        value.set_string_value(it.get<std::string>());
                        *request->mutable_values()->Add() = std::move(value);
                        continue;
                    }
                    if (it.is_number()) {
                        PromptsValue value;
                        value.set_number_value(it.get<int32_t>());
                        *request->mutable_values()->Add() = std::move(value);
                        continue;
                    }
                    // list
                    PromptsValueList *list_value = request->mutable_values()->Add()->mutable_list_value();
                    for (auto &sit: it) {
                        if (sit.is_string()) {
                            PromptsValue value;
                            value.set_string_value(sit.get<std::string>());
                            *list_value->mutable_values()->Add() = std::move(value);
                            continue;
                        }
                        if (sit.is_number()) {
                            PromptsValue value;
                            value.set_number_value(sit.get<int32_t>());
                            *list_value->mutable_values()->Add() = std::move(value);
                            continue;
                        }
                        return turbo::invalid_argument_error("too more sub lists");
                    }
                }  // for
            }
            return turbo::OkStatus();
        }

        turbo::Status
        parse_oai_slot_params(const nlohmann::ordered_json &data, const KMContext *context, KaiRequest &req) {
            static SlotParams default_params;
            req.mutable_slot_params()->set_stream(json_value(data, "stream", false));
            req.mutable_slot_params()->set_cache_prompt(json_value(data, "cache_prompt", false));
            req.mutable_slot_params()->set_n_predict(
                    json_value(data, "n_predict", json_value(data, "max_tokens", context->params.n_predict)));
            req.mutable_slot_params()->set_n_indent(json_value(data, "n_indent", default_params.n_indent()));
            req.mutable_slot_params()->set_n_keep(json_value(data, "n_keep", default_params.n_keep()));
            req.mutable_slot_params()->set_n_discard(json_value(data, "n_discard", default_params.n_discard()));
            // TODO: implement
            req.mutable_slot_params()->set_t_max_prompt_ms(
                    json_value(data, "t_max_prompt_ms", default_params.t_max_prompt_ms()));
            req.mutable_slot_params()->set_t_max_predict_ms(
                    json_value(data, "t_max_predict_ms", default_params.t_max_predict_ms()));

            const auto &stop = data.find("stop");
            if (stop != data.end() && stop->is_array()) {
                for (const auto &word: *stop) {
                    if (!word.empty()) {
                        *req.mutable_slot_params()->mutable_antiprompt()->Add() = word;
                    }
                }
            } else if (stop != data.end() && stop->is_string()) {
                *req.mutable_slot_params()->mutable_antiprompt()->Add() = stop->get<std::string>();
            }
            return turbo::OkStatus();
        }

        turbo::Status
        parse_oai_sampler_params(const nlohmann::ordered_json &data, const KMContext *context, KaiRequest &req) {
            auto &default_sparams = context->params.sparams;
            auto sparam = req.mutable_sparam();
            sparam->set_top_k(json_value(data, "top_k", default_sparams.top_k));
            sparam->set_top_p(json_value(data, "top_p", default_sparams.top_p));
            sparam->set_min_p(json_value(data, "min_p", default_sparams.min_p));
            sparam->set_xtc_probability(
                    json_value(data, "xtc_probability", default_sparams.xtc_probability));
            sparam->set_xtc_threshold(json_value(data, "xtc_threshold", default_sparams.xtc_threshold));
            sparam->set_typ_p(json_value(data, "typical_p", default_sparams.typ_p));
            sparam->set_temp(json_value(data, "temperature", default_sparams.temp));
            sparam->set_dynatemp_range(
                    json_value(data, "dynatemp_range", default_sparams.dynatemp_range));
            sparam->set_dynatemp_exponent(
                    json_value(data, "dynatemp_exponent", default_sparams.dynatemp_exponent));
            sparam->set_penalty_last_n(
                    json_value(data, "repeat_last_n", default_sparams.penalty_last_n));
            sparam->set_penalty_repeat(
                    json_value(data, "repeat_penalty", default_sparams.penalty_repeat));
            sparam->set_penalty_freq(
                    json_value(data, "frequency_penalty", default_sparams.penalty_freq));
            sparam->set_penalty_present(
                    json_value(data, "presence_penalty", default_sparams.penalty_present));
            sparam->set_dry_multiplier(
                    json_value(data, "dry_multiplier", default_sparams.dry_multiplier));
            sparam->set_dry_base(json_value(data, "dry_base", default_sparams.dry_base));
            sparam->set_dry_allowed_length(
                    json_value(data, "dry_allowed_length", default_sparams.dry_allowed_length));
            sparam->set_dry_penalty_last_n(
                    json_value(data, "dry_penalty_last_n", default_sparams.dry_penalty_last_n));
            sparam->set_mirostat(json_value(data, "mirostat", default_sparams.mirostat));
            sparam->set_mirostat_tau(json_value(data, "mirostat_tau", default_sparams.mirostat_tau));
            sparam->set_mirostat_eta(json_value(data, "mirostat_eta", default_sparams.mirostat_eta));
            sparam->set_penalize_nl(json_value(data, "penalize_nl", default_sparams.penalize_nl));
            sparam->set_seed(json_value(data, "seed", default_sparams.seed));
            sparam->set_n_probs(json_value(data, "n_probs", default_sparams.n_probs));
            sparam->set_min_keep(json_value(data, "min_keep", default_sparams.min_keep));
            if (data.contains("dry_sequence_breakers")) {
                auto vl = json_value(data, "dry_sequence_breakers",
                                     std::vector<std::string>());
                sparam->mutable_dry_sequence_breakers()->Assign(vl.begin(), vl.end());
            }
            if (data.contains("json_schema") && !data.at("json_schema").is_null() &&
                data.contains("grammar") && !data.at("grammar").is_null()) {
                return turbo::invalid_argument_error(
                        "Either \"json_schema\" or \"grammar\" can be specified, but not both");
            }
            if (data.contains("json_schema") && !data.at("json_schema").is_null()) {
                auto obj = json_value(data, "json_schema", nlohmann::ordered_json::object());
                sparam->set_grammar(json_schema_to_grammar(obj));
            } else {
                sparam->set_grammar(json_value(data, "grammar", default_sparams.grammar));
            };

            const auto &samplers = data.find("samplers");
            if (samplers != data.end() && samplers->is_array()) {
                std::vector<std::string> sampler_names;
                for (const auto &name: *samplers) {
                    if (name.is_string()) {
                        sampler_names.push_back(name.get<std::string>());
                    }
                }
                auto r = common_sampler_types_from_names(sampler_names, false);
                req.mutable_sparam()->mutable_samplers()->Assign(r.begin(), r.end());
            } else {
                req.mutable_sparam()->mutable_samplers()->Assign(default_sparams.samplers.begin(),
                                                                 default_sparams.samplers.end());
            }

            req.mutable_sparam()->set_ignore_eos(false);
            if (json_value(data, "ignore_eos", false)) {
                req.mutable_sparam()->set_ignore_eos(true);
            }

            const auto &logit_bias = data.find("logit_bias");
            if (logit_bias != data.end() && logit_bias->is_array()) {
                for (const auto &el: *logit_bias) {
                    // TODO: we may want to throw errors here, in case "el" is incorrect
                    if (el.is_array() && el.size() == 2) {
                        float bias;
                        if (el[1].is_number()) {
                            bias = el[1].get<float>();
                        } else if (el[1].is_boolean() && !el[1].get<bool>()) {
                            bias = -INFINITY;
                        } else {
                            continue;
                        }

                        if (el[0].is_number_integer()) {
                            llama_token tok = el[0].get<llama_token>();
                            LogitBias lb;
                            lb.set_bias(bias);
                            lb.set_token(tok);
                            *req.mutable_sparam()->mutable_logit_bias()->Add() = lb;
                        } else if (el[0].is_string()) {
                            LogitBias lb;
                            lb.set_bias(bias);
                            lb.set_str_token(el[0].get<std::string>());
                            *req.mutable_sparam()->mutable_logit_bias()->Add() = lb;
                        }
                    }
                }
            }


            return turbo::OkStatus();

        }
    }  // namespace internal

    turbo::Status
    parse_oai_base_request(const nlohmann::ordered_json &data, const KMContext *context, KaiRequest &req) {
        try {
            auto rs = internal::parse_oai_slot_params(data, context, req);
            if (!rs.ok()) {
                return rs;
            }
            rs = internal::parse_oai_sampler_params(data, context, req);
            if (!rs.ok()) {
                return rs;
            }

            req.set_id_slot(json_value(data, "id_slot", -1));
            req.set_oaicompat(json_value(data, "__oaicompat", false));
            req.set_fail_on_no_slot(json_value(data, "fail_on_no_slot", false));
            req.set_model(json_value(data, "model", context->params.model_alias));
            return turbo::OkStatus();
        } catch (std::exception &e) {
            return turbo::unknown_error(e.what());
        }
    }

    turbo::Status
    parse_oai_tokenize_request(const nlohmann::ordered_json &json, const KMContext *context, KaiRequest &req) {
        auto rs = parse_oai_base_request(json, context, req);
        if (!rs.ok()) {
            return rs;
        }
        rs = internal::parse_aoi_input_request(json, req.mutable_tokenize_request()->mutable_prompts());
        if (!rs.ok()) {
            return rs;
        }

        req.mutable_tokenize_request()->set_add_special(json_value(json, "add_special", false));
        req.mutable_tokenize_request()->set_with_pieces(json_value(json, "with_pieces", false));
        if (req.tokenize_request().prompts().values().empty()) {
            return turbo::invalid_argument_error("\"input\" or \"content\"must be provided");
        }
        return turbo::OkStatus();
    }

    turbo::Status parse_oai_tokenize_request(const std::string &content, const KMContext *context, KaiRequest &req) {
        try {
            const nlohmann::ordered_json body = nlohmann::ordered_json::parse(content);
            return parse_oai_tokenize_request(body, context, req);
        } catch (std::exception &e) {
            return turbo::data_loss_error("bad json: %s", e.what());
        }
    }

    turbo::Status
    parse_oai_detokenize_request(const nlohmann::ordered_json &json, const KMContext *context, KaiRequest &req) {
        auto rs = parse_oai_base_request(json, context, req);
        if (!rs.ok()) {
            return rs;
        }
        if (json.count("tokens") != 0) {
            const std::vector<llama_token> tokens = json.at("tokens");
            req.mutable_detokenize()->mutable_tokens()->Assign(tokens.begin(), tokens.end());
        }
        return turbo::OkStatus();
    }

    turbo::Status parse_oai_detokenize_request(const std::string &content, const KMContext *context, KaiRequest &req) {
        try {
            const nlohmann::ordered_json body = nlohmann::ordered_json::parse(content);
            return parse_oai_detokenize_request(body, context, req);
        } catch (std::exception &e) {
            return turbo::data_loss_error("bad json: %s", e.what());
        }
    }

    turbo::Status
    parse_oai_embedding_request(const nlohmann::ordered_json &body, const KMContext *context, KaiRequest &req) {
        auto rs = parse_oai_base_request(body, context, req);
        if (!rs.ok()) {
            return rs;
        }
        rs = internal::parse_aoi_input_request(body, req.mutable_embedding_request()->mutable_prompts());
        if (!rs.ok()) {
            return rs;
        }

        if (req.tokenize_request().prompts().values().empty()) {
            return turbo::invalid_argument_error("\"input\" or \"content\"must be provided");
        }
        return turbo::OkStatus();
    }

    turbo::Status parse_oai_embedding_request(const std::string &content, const KMContext *context, KaiRequest &req) {
        try {
            LOG(INFO) << "content: " << content;
            const nlohmann::ordered_json body = nlohmann::ordered_json::parse(content);
            return parse_oai_embedding_request(body, context, req);
        } catch (std::exception &e) {
            return turbo::data_loss_error("bad json: %s", e.what());
        }
    }

    turbo::Status
    parse_oai_completions_request(const nlohmann::ordered_json &json, const KMContext *context, KaiRequest &req) {
        auto rs = parse_oai_base_request(json, context, req);
        if (!rs.ok()) {
            return rs;
        }
        if (json.count("prompt")) {
            try {
                auto s = json.at("prompt").get<std::string>();
                req.mutable_completion_request()->set_prompts(s);
            } catch (const std::exception &e) {
                return turbo::invalid_argument_error("prompt must string");
            }
        }
        if (req.completion_request().prompts().empty()) {
            return turbo::invalid_argument_error("\"prompt\" must be provided");
        }
        return turbo::OkStatus();
    }

    turbo::Status parse_oai_completions_request(const std::string &content, const KMContext *context, KaiRequest &req) {
        try {
            const nlohmann::ordered_json body = nlohmann::ordered_json::parse(content);
            return parse_oai_completions_request(body, context, req);
        } catch (std::exception &e) {
            return turbo::data_loss_error("bad json: %s", e.what());
        }
    }

    turbo::Status
    parse_oai_chat_completions_request(const nlohmann::ordered_json &body, const KMContext *context, KaiRequest &req) {
        auto rs = parse_oai_base_request(body, context, req);
        if (!rs.ok()) {
            return rs;
        }
        if (!body.contains("messages")) {
            return turbo::invalid_argument_error("miss messages field");
        }
        auto rst = internal::format_chat(context, body.at("messages"));
        if (!rst.ok()) {
            return rst.status();
        }
        req.mutable_chat_completion_request()->set_message(rst.value());

        if (body.contains("response_format")) {
            nlohmann::ordered_json response_format = json_value(body, "response_format",
                                                                nlohmann::ordered_json::object());
            std::string response_type = json_value(response_format, "type", std::string());
            if (response_type == "json_object") {
                req.mutable_sparam()->set_grammar(json_schema_to_grammar(
                        json_value(response_format, "schema", nlohmann::ordered_json::object())));
            } else if (response_type == "json_schema") {
                nlohmann::ordered_json json_schema = json_value(response_format, "json_schema",
                                                                nlohmann::ordered_json::object());
                req.mutable_sparam()->set_grammar(
                        json_schema_to_grammar(json_value(json_schema, "schema", nlohmann::ordered_json::object())));
            } else if (!response_type.empty() && response_type != "text") {
                return turbo::invalid_argument_error(
                        "response_format type must be one of \"text\" or \"json_object\", but got: " + response_type);
            }
        }
        int n_choices = json_value(body, "n", 1);
        if (n_choices != 1) {
            throw std::runtime_error("Only one completion choice is allowed");
        }

        // TODO: The response format of this option is not yet OAI-compatible, but seems like no one really using it; We may need to fix it in the future
        if (json_value(body, "logprobs", false)) {
            req.set_n_probs(json_value(body, "top_logprobs", 20));
        } else if (body.contains("top_logprobs") && !body.at("top_logprobs").is_null()) {
            return turbo::data_loss_error("top_logprobs requires logprobs to be set to true");
        }

        // Params supported by OAI but unsupported by llama.cpp
        static const std::vector<std::string> unsupported_params{"tools", "tool_choice"};
        for (const auto &param: unsupported_params) {
            if (body.contains(param)) {
                return turbo::unavailable_error("Unsupported param: " + param);
            }
        }

        return turbo::OkStatus();
    }

    turbo::Status
    parse_oai_chat_completions_request(const std::string &content, const KMContext *context, KaiRequest &req) {
        try {
            const nlohmann::ordered_json body = nlohmann::ordered_json::parse(content);
            return parse_oai_chat_completions_request(body, context, req);
        } catch (std::exception &e) {
            return turbo::data_loss_error("bad json: %s", e.what());
        }
    }

    turbo::Status parse_mix_value_list(const nlohmann::ordered_json &body, PromptsValue &value) {
        for (auto &it: body) {
            if (it.is_string()) {
                PromptsValue v;
                v.set_string_value(it.get<std::string>());
                *v.mutable_list_value()->mutable_values()->Add() = std::move(v);
            }
            if (it.is_number()) {
                PromptsValue v;
                v.set_number_value(it.get<int32_t>());
                *v.mutable_list_value()->mutable_values()->Add() = std::move(v);
            }
            return turbo::invalid_argument_error("bad data type");
        }
        return turbo::OkStatus();
    }

    turbo::Status
    parse_oai_infill_request(const nlohmann::ordered_json &data, const KMContext *context, KaiRequest &req) {
        auto body = data;
        auto rs = parse_oai_base_request(body, context, req);
        if (!rs.ok()) {
            return rs;
        }
        if (!body.contains("input_prefix")) {
            return turbo::invalid_argument_error("\"input_prefix\" is required");
        }

        if (!body.contains("input_suffix")) {
            return turbo::invalid_argument_error("\"input_suffix\" is required");
        }

        if (body.contains("input_extra") && !body.at("input_extra").is_array()) {
            return turbo::invalid_argument_error(
                    "\"input_extra\" must be an array of {\"filename\": string, \"text\": string}");
        }
        PromptsValue input_prefix;
        rs = parse_mix_value_list(body.at("input_prefix"), input_prefix);
        if (!rs.ok()) {
            return turbo::invalid_argument_error("parse input prefix error");
        }
        *req.mutable_infill_request()->mutable_input_prefix() = input_prefix;

        PromptsValue input_suffix;
        rs = parse_mix_value_list(body.at("input_suffix"), input_suffix);
        if (!rs.ok()) {
            return turbo::invalid_argument_error("parse input suffix error");
        }
        *req.mutable_infill_request()->mutable_input_suffix() = input_suffix;

        if (!body.contains("input_extra")) {
            body["input_extra"] = nlohmann::ordered_json::array();
        }
        try {
            for (auto &chunk: body) {
                ExtraPair ep;
                const std::string text = json_value(chunk, "text", std::string());
                const std::string filename = json_value(chunk, "filename", std::string("tmp"));
                ep.set_filename(filename);
                ep.set_text(text);
                *req.mutable_infill_request()->mutable_input_extra()->Add() = std::move(ep);
            }
        } catch (const std::exception &e) {
            return turbo::data_loss_error(e.what());
        }

        if (data.count("prompt")) {
            try {
                auto s = data.at("prompt").get<std::string>();
                req.mutable_infill_request()->set_prompts(s);
            } catch (const std::exception &e) {
                return turbo::invalid_argument_error("prompt must string");
            }
        }
        if (req.completion_request().prompts().empty()) {
            return turbo::invalid_argument_error("\"prompt\" must be provided");
        }

        return turbo::OkStatus();
    }

    turbo::Status parse_oai_infill_request(const std::string &content, const KMContext *context, KaiRequest &req) {
        try {
            const nlohmann::ordered_json body = nlohmann::ordered_json::parse(content);
            return parse_oai_infill_request(body, context, req);
        } catch (std::exception &e) {
            return turbo::data_loss_error("bad json: %s", e.what());
        }
    }

    turbo::Status
    parse_oai_lora_request(const nlohmann::ordered_json &body, const KMContext *context, KaiRequest &req) {
        auto rs = parse_oai_base_request(body, context, req);
        if (!rs.ok()) {
            return rs;
        }
        if (body.count("loras") != 0) {
            const std::vector<nlohmann::ordered_json> loras = body.at("loras");
            std::string empty;
            for (const auto &it: loras) {
                int id = it.at("id");
                float scale = it.at("scale");
                std::string path = json_value(it, "path", empty);
                LoraInfo *info = req.mutable_lora_request()->mutable_lora_infos()->Add();
                info->set_id(id);
                info->set_scale(scale);
                info->set_path(path);
            }
        }
        return turbo::OkStatus();
    }

    turbo::Status parse_oai_lora_request(const std::string &content, const KMContext *context, KaiRequest &req) {
        try {
            const nlohmann::ordered_json body = nlohmann::ordered_json::parse(content);
            return parse_oai_lora_request(body, context, req);
        } catch (std::exception &e) {
            return turbo::data_loss_error("bad json: %s", e.what());
        }
    }

    turbo::Status
    parse_oai_rerank_request(const nlohmann::ordered_json &body, const KMContext *context, KaiRequest &req) {
        auto rs = parse_oai_base_request(body, context, req);
        if (!rs.ok()) {
            return rs;
        }

        nlohmann::ordered_json query;
        if (body.count("query") == 1) {
            query = body.at("query");
            if (!query.is_string()) {
                return turbo::invalid_argument_error("\"query\" must be a string");
            }
        } else {
            return turbo::invalid_argument_error("\"query\" must be provided");
        }

        std::vector<std::string> documents = json_value(body, "documents", std::vector<std::string>());
        if (documents.empty()) {
            return turbo::invalid_argument_error("\"documents\" must be a non-empty string array");
        }

        req.mutable_rerank_request()->set_query(query.get<std::string>());
        for (auto &it: documents) {
            *req.mutable_rerank_request()->mutable_docs()->Add() = it;
        }
        // merge to PromptsValue
        auto prompt = req.mutable_rerank_request()->mutable_prompts()->mutable_values()->Add();
        PromptsValue pquery;
        pquery.set_string_value(req.rerank_request().query());
        *prompt->mutable_list_value()->mutable_values()->Add() = std::move(pquery);
        for (auto &it: documents) {
            PromptsValue doc;
            doc.set_string_value(it);
            *prompt->mutable_list_value()->mutable_values()->Add() = std::move(doc);
        }

        return turbo::OkStatus();
    }

    turbo::Status parse_oai_rerank_request(const std::string &content, const KMContext *context, KaiRequest &req) {
        try {
            const nlohmann::ordered_json body = nlohmann::ordered_json::parse(content);
            return parse_oai_rerank_request(body, context, req);
        } catch (std::exception &e) {
            return turbo::data_loss_error("bad json: %s", e.what());
        }
    }

    turbo::Status
    parse_oai_slots_request(const nlohmann::ordered_json &json, const KMContext *context, KaiRequest &req) {
        if (req.query_type() == QUERY_SLOTS_ERASE) {
            return turbo::OkStatus();
        }
        if (!json.contains("filename")) {
            return turbo::data_loss_error("filename must be set");
        }
        std::string filename = json.at("filename");
        if (!fs_validate_filename(filename)) {
            return turbo::invalid_argument_error("Invalid filename");
        }
        std::string filepath = context->params.slot_save_path + filename;
        req.mutable_slots_task()->set_filename(filename);
        req.mutable_slots_task()->set_filepath(filepath);
        return turbo::OkStatus();
    }

    turbo::Status parse_oai_slots_request(const std::string &content, const KMContext *context, KaiRequest &req) {
        try {
            const nlohmann::ordered_json body = nlohmann::ordered_json::parse(content);
            return parse_oai_slots_request(body, context, req);
        } catch (std::exception &e) {
            return turbo::data_loss_error("bad json: %s", e.what());
        }
    }

}  // namespace kllm
