// Copyright (C) 2024 Kumo inc.
// Author: Jeff.li lijippy@163.com
// All rights reserved.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program.  If not, see <https://www.gnu.org/licenses/>.
//

#pragma once

#include <llama.h>
#include <kllm/core/types.h>
#include <vector>
#include <unordered_set>
#include <kllm/utility/all.h>
#include <kllm/proto/interface.struct.pb.h>

namespace kllm {

    struct server_task {
        int id        = -1; // to be filled by server_queue
        int id_target = -1; // used by SERVER_TASK_TYPE_CANCEL
        bool reset_bucket = false;
        //int id_slot = -1;
        int index = 0;
        std::vector<llama_token> prompt_tokens;
        ServerTaskType type;
        KaiRequest req;

        ServerTaskInfType inf_type = SERVER_TASK_INF_TYPE_COMPLETION;

        // utility function
        static std::unordered_set<int> get_list_id(const std::vector<server_task> & tasks) {
            std::unordered_set<int> ids(tasks.size());
            for (size_t i = 0; i < tasks.size(); i++) {
                ids.insert(tasks[i].id);
            }
            return ids;
        }
    };
    /*
    struct slot_params {
        bool stream       = true;
        bool cache_prompt = false; // remember the prompt to avoid reprocessing all prompt

        int32_t n_keep    =  0; // number of tokens to keep from initial prompt
        int32_t n_discard =  0; // number of tokens after n_keep that may be discarded when shifting context, 0 defaults to half
        int32_t n_predict = -1; // new tokens to predict
        int32_t n_indent  =  0; // mininum line indentation for the generated text in number of whitespace characters

        int64_t t_max_prompt_ms  = -1; // TODO: implement
        int64_t t_max_predict_ms = -1; // if positive, limit the generation phase to this time limit

        std::vector<std::string> antiprompt;
    };
    */
    struct server_slot {
        int id;
        int id_task = -1;

        // the index relative to completion multi-task request
        size_t index = 0;

        SlotParams params;

        SlotState state = SLOT_STATE_IDLE;

        // used to determine the slot that has been used the longest
        int64_t t_last_used = -1;

        // generation props
        int32_t n_ctx       = 0;  // context size per slot
        int32_t n_past      = 0;
        int32_t n_decoded   = 0;
        int32_t n_remaining = -1;
        int32_t i_batch     = -1;
        int32_t n_predict   = -1; // TODO: disambiguate from params.n_predict

        // n_prompt_tokens may not be equal to prompt_tokens.size(), because prompt maybe truncated
        int32_t n_prompt_tokens           = 0;
        int32_t n_prompt_tokens_processed = 0;

        // input prompt tokens
        std::vector<llama_token> prompt_tokens;

        size_t last_nl_pos = 0;

        std::string generated_text;
        std::vector<llama_token> cache_tokens;
        std::vector<completion_token_output> generated_token_probs;

        ServerTaskInfType inf_type = SERVER_TASK_INF_TYPE_COMPLETION;

        bool has_next_token = true;
        bool has_new_line   = false;
        bool truncated      = false;
        bool stopped_eos    = false;
        bool stopped_word   = false;
        bool stopped_limit  = false;

        bool oaicompat = false;

        std::string oaicompat_model;
        std::string stopping_word;

        // sampling
        nlohmann::ordered_json json_schema;

        InternalSamplerParams sparams;
        struct common_sampler * smpl = nullptr;

        llama_token sampled;

        // stats
        size_t n_sent_text        = 0; // number of sent text character
        size_t n_sent_token_probs = 0;

        int64_t t_start_process_prompt;
        int64_t t_start_generation;

        double t_prompt_processing; // ms
        double t_token_generation;  // ms

        std::function<void(int)> callback_on_release;

        void reset();

        bool has_budget(KMParams &global_params);

        bool is_processing() const;

        void add_token(const completion_token_output & token);

        void release();

        size_t find_stopping_strings(const std::string & text, const size_t last_token_size, const StopType type);

        void print_timings() const;
    private:
        size_t find_partial_stop_string(const std::string &stop, const std::string &text);
    };

    inline bool server_slot::is_processing() const {
        return state != SLOT_STATE_IDLE;
    }


}  // namespace kllm
