// Copyright (C) 2024 Kumo inc.
// Author: Jeff.li lijippy@163.com
// All rights reserved.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program.  If not, see <https://www.gnu.org/licenses/>.
//

#pragma once

#include <kllm/utility/all.h>
#include <turbo/utility/status.h>
#include <llama.h>

namespace kllm {

    class KMModel {
    public:
        KMModel() = default;

        ~KMModel();

        turbo::Status initialize(KMParams &p);

        void finalize();

        bool validate_model_chat_template() const;

        void kv_cache_clear();

        // tokenizes a string into a vector of tokens
        // should work similar to Python's `tokenizer.encode`
        [[nodiscard]] std::vector<llama_token>
        tokenize(const std::string &text, bool add_special, bool parse_special = false) const;

        llama_model *model{nullptr};
        llama_context *ctx{nullptr};
        KMParams *_params;
        std::vector<common_lora_adapter_container> loras;
        int32_t n_ctx{0};
        bool clean_kv_cache{true};
        bool add_bos_token{true};
        bool has_eos_token{false};
    };
}  // namespace kllm
