// Copyright (C) 2024 Kumo inc.
// Author: Jeff.li lijippy@163.com
// All rights reserved.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program.  If not, see <https://www.gnu.org/licenses/>.
//

#pragma once

#include <kllm/core/km_model.h>
#include <kllm/core/task_slot.h>
#include <kllm/core/message.h>
#include <kllm/core/metric.h>
#include <kllm/core/queue.h>
#include <kllm/core/sampling.h>
#include <kllm/core/response.h>
#include <kllm/core/format.h>
#include <kllm/proto/interface.struct.pb.h>
#include <turbo/utility/status.h>

#define DEFAULT_OAICOMPAT_MODEL "gpt-3.5-turbo-0613"

namespace kllm {

    struct common_chat_msg {
        std::string role;
        std::string content;
    };

    struct KMContext {
        KMModel km_model;
        llama_batch batch = {};

        // slots / clients
        std::vector<server_slot> slots;

        server_queue queue_tasks;
        server_response queue_results;

        server_metrics metrics;

        KMParams params;

        // Necessary similarity of prompt for slot selection
        float slot_prompt_similarity = 0.0f;

        ~KMContext() {
            // Clear any sampling context
            for (server_slot &slot: slots) {
                if (slot.smpl != nullptr) {
                    common_sampler_free(slot.smpl);
                }
            }

            llama_batch_free(batch);
        }

        turbo::Status initialize(const KMParams &p);

        server_slot *get_slot_by_id(int id);

        server_slot *get_available_slot(const server_task &task);

        bool launch_slot_with_task(server_slot &slot, const server_task &task);

        void kv_cache_clear();

        bool process_token(completion_token_output &result, server_slot &slot);
        //
        // Functions to create new task(s) and receive result(s)
        //

        // break the input "prompt" into multiple tasks if needed, then format and tokenize the input prompt(s)
        std::vector<server_task> create_tasks_inference(const KaiRequest &data, ServerTaskInfType inf_type, const KaiPrompts&prompts);

        std::vector<std::vector<llama_token>>
        tokenize_input_prompts(const KaiPrompts &prompt, bool add_special, bool parse_special,
                               turbo::Status &status);

        template<typename Iter>
        std::string tokens_to_str(Iter begin, Iter end);

        void cancel_tasks(const std::unordered_set<int> &id_tasks);

        // receive the results from task(s) created by create_tasks_inference
        void receive_cmpl_results(
                const std::unordered_set<int> &id_tasks,
                const std::function<void(std::vector<TaskResult> &)> &result_handler,
                const std::function<void(const turbo::Status &)> &error_handler);

        // receive the results from task(s) created by create_tasks_inference, in stream mode
        void receive_cmpl_results_stream(
                const std::unordered_set<int> &id_tasks, const
        std::function<bool(TaskResult &)> &result_handler, const
                std::function<void(const turbo::Status &status)> &error_handler);
        //
        // Functions to process the task
        //
        void process_single_task(server_task task);

        void update_slots();

        ModelMeta get_meta() const;

        /// public apis
        std::vector<llama_token> tokenize(const std::string &text, bool add_special, bool parse_special = false);

        // tokenizes a token into a piece, optionally renders special/control tokens
        // should work similar to Python's `tokenizer.id_to_piece`
        std::string token_to_piece(llama_token token, bool special = true) const;

        // detokenizes a vector of tokens into a string
        // should work similar to Python's `tokenizer.decode`
        // optionally renders special/control tokens
        std::string detokenize(const std::vector<llama_token> &tokens, bool special = true) const;


        std::string tokens_to_output_formatted_string(const llama_token token) const;


        // Check if the template supplied via "--chat-template" is supported or not. Returns true if it's valid
        static bool chat_verify_template(const std::string & tmpl);

        static void string_process_escapes(std::string & input);

        std::string string_from(const struct llama_batch & batch) const;

        std::string string_from(const std::vector<llama_token> & tokens) const;

        // CPP wrapper for llama_chat_apply_template
        // If the built-in template is not supported, we default to chatml
        // If the custom "tmpl" is not supported, we throw an error
        std::string chat_apply_template(const std::string & tmpl, const std::vector<common_chat_msg> & msgs, bool add_ass) const;

        // Format single message, while taking into account the position of that message in chat history
        std::string chat_format_single(const std::string & tmpl,
                                                  const std::vector<common_chat_msg> & past_msg,
                                                  const common_chat_msg & new_msg,
                                                  bool add_ass) const;

        // Returns an example of formatted chat
        std::string chat_format_example(const std::string &tmpl) const;

    public:
        // getters

        const server_slot &get_default_generation_settings_for_props() const;

        KaiSlotState trans_proto(const server_slot &slot) const;

    private:
        void send_partial_response(server_slot &slot, completion_token_output tkn);

        void send_final_response(const server_slot &slot);

        void send_embedding(const server_slot &slot, const llama_batch &batch);

        void send_rerank(const server_slot &slot, const llama_batch &batch);

        void
        send_error(const server_task &task, const std::string &error, const enum ErrorType type = ERROR_TYPE_SERVER);

        void
        send_error(const server_slot &slot, const std::string &error, const enum ErrorType type = ERROR_TYPE_SERVER);

        void send_error(const int id_task, const std::string &error, const enum ErrorType type = ERROR_TYPE_SERVER);

        std::vector<llama_token>
        tokenize_mixed(const PromptsValue &value, bool add_special, bool parse_special) const;

        std::vector<llama_token> format_infill(
                const KaiRequest &req,
                const int n_batch,
                const int n_predict,
                const int n_ctx,
                const bool spm_infill,
                const std::vector<llama_token> &tokens_prompt
        );

    private:
        server_slot default_generation_settings_for_props;
    };

    inline const server_slot &KMContext::get_default_generation_settings_for_props() const {
        return default_generation_settings_for_props;
    }

    template<typename Iter>
    std::string KMContext::tokens_to_str(Iter begin, Iter end) {
        std::string ret;
        for (; begin != end; ++begin) {
            ret += token_to_piece(*begin);
        }

        return ret;
    }


}  // namespace kllm

