// Copyright (C) 2024 Kumo inc.
// Author: Jeff.li lijippy@163.com
// All rights reserved.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program.  If not, see <https://www.gnu.org/licenses/>.
//

#include <kllm/tools/oai/oai.h>
#include <kllm/tools/service_context.h>
#include <turbo/bootstrap/servlet.h>
#include <turbo/flags/flag.h>
#include <kllm/openai/embedding.h>
#include <kllm/kai/kai.h>

namespace kllm {

    static std::vector<std::string> split_emb_lines(const std::string & s, const std::string & separator = "\n") {
        std::vector<std::string> lines;
        size_t start = 0;
        size_t end = s.find(separator);

        while (end != std::string::npos) {
            lines.push_back(s.substr(start, end - start));
            start = end + separator.length();
            end = s.find(separator, start);
        }

        lines.push_back(s.substr(start)); // Add the last part

        return lines;
    }

    static ServiceContext oai_context;

    static void run_embedding();
    turbo::Status setup_embedding_cmd(turbo::cli::App *app) {
        auto emb_cmd = app->add_subcommand("embedding", "embedding the inputs");
        oai_context.params_context = ParamsContext::setup_app_context(emb_cmd,ServiceContext::params,LLAMA_EXAMPLE_EMBEDDING);
        turbo::Servlet::setup_log_option(emb_cmd);
        emb_cmd->callback(run_embedding);
        return turbo::OkStatus();
    }

    void run_embedding() {

        ServiceContext::params.embedding = true;
        // For non-causal models, batch size must be equal to ubatch size
        ServiceContext::params.n_ubatch = ServiceContext::params.n_batch;

        ServiceContext::call_after_parse();
        // Generally you only need one Server.
        // load the model
        LOG_INF("%s: loading model\n", __func__);
        oai_context.state.store(SERVER_STATE_READY);
        LOG_INF("%s: model loaded\n", __func__);



        KaiEmbeddings embd(&ServiceContext::ctx_server);
        KaiRequest km_req;
        KaiResponse km_res;
        km_req.set_query_type(QUERY_EMBEDDING);
        std::vector<std::string> prompts = split_emb_lines(ServiceContext::params.prompt, ServiceContext::params.embd_sep);
        kllm::PromptsValue string_list;
        for(auto &it : prompts) {
            kllm::PromptsValue entity;
            entity.set_string_value(it);
            *string_list.mutable_list_value()->mutable_values()->Add() = std::move(entity);
        }
        *km_req.mutable_embedding_request()->mutable_prompts()->mutable_values()->Add() = string_list;
        oai_context.start_context_async();
        embd.embedding(km_req, km_res);
        // write JSON response
        if (km_res.status().code() == 0) {
            nlohmann::ordered_json json;
            int err_code;
            std::string errmsg;
            format_aoi_embedding_response(km_res, json, errmsg, err_code);
            std::cout<<json.dump()<<std::endl;
        } else {
            std::cout<<"code: "<<km_res.status().code()<<std::endl;
            std::cout<<"message: "<<km_res.status().errmsg()<<std::endl;
        }
        oai_context.stop_context_async();
    }
}  // namespace kllm
