//
// Created by jeff on 24-11-24.
//


#include <kllm/tools/service_context.h>
#include <kllm/tools/oai/oai.h>
#include <kllm/tools/embedding/emb.h>
#include <kllm/tools/quantize/quantize.h>
#include <thread>

namespace kllm {

    KMParams  ServiceContext::params;
    KMContext ServiceContext::ctx_server;

    void ServiceContext::call_after_parse() {
        params.verify_model_alias();

        llama_backend_init();
        llama_numa_init(params.numa);
        LOG_INF("system info: n_threads = %d, n_threads_batch = %d, total_threads = %d\n", params.cpuparams.n_threads,
                params.cpuparams_batch.n_threads, std::thread::hardware_concurrency());
        LOG_INF("\n");
        LOG_INF("%s\n", common_params_get_system_info(params).c_str());
        LOG_INF("\n");
        // if a custom chat template is not supplied, we will use the one that comes with the model (if any)
        auto rs = ServiceContext::ctx_server.initialize(ServiceContext::params);
        if (!rs.ok()) {
            LOG_ERR("%s: exiting due to model loading error\n", __func__);
            ServiceContext::clean_up();
            exit(1);
        }

        if (params.chat_template.empty()) {
            if (!ctx_server.km_model.validate_model_chat_template()) {
                LOG_WRN("%s: The chat template that comes with this model is not yet supported, falling back to chatml. This may cause the model to output suboptimal responses\n",
                        __func__);
                params.chat_template = "chatml";
            }
        }
        // print sample chat example to make it clear which template is used
        LOG_INF("%s: chat template, built_in: %d, chat_example: '%s'\n", __func__, params.chat_template.empty(),
                ctx_server.chat_format_example(params.chat_template).c_str());
        LOG(INFO)<<"model: "<<ServiceContext::params.model;
    }

    void ServiceContext::clean_up() {
        llama_backend_free();
    }

    void ServiceContext::start_context_async() {
        auto func = []() {
            ctx_server.queue_tasks.on_new_task(std::bind(
                    &KMContext::process_single_task, &ctx_server, std::placeholders::_1));

            ctx_server.queue_tasks.on_update_slots(std::bind(
                    &KMContext::update_slots, &ctx_server));
            ctx_server.queue_tasks.start_loop();
        };
        ctx_runner = std::make_unique<std::thread>(std::thread(func));
    }

    void ServiceContext::stop_context_async() {
        if(ctx_runner) {
            ctx_server.queue_tasks.terminate();
            ctx_runner->join();
        }
    }

    turbo::Status setup_app(turbo::cli::App *app) {
        auto rs = setup_oai_service(app);
        if(!rs.ok()) {
            return rs;
        }
        rs = setup_embedding_cmd(app);
        if(!rs.ok()) {
            return rs;
        }

        rs = setup_quantize_cmd(app);
        if(!rs.ok()) {
            return rs;
        }

        //app->require_subcommand(1);
        app->parse_complete_callback([app](){
            auto subs = app->get_subcommands();
            if(subs.size() == 0) {
                std::cerr<<app->help()<<std::endl;
                exit(1);
            }
        });
        return turbo::OkStatus();
    }
}  // namespace kllm