|
#pragma once |
|
|
|
#include "llama.h" |
|
#include "llama-batch.h" |
|
#include "llama-cparams.h" |
|
#include "llama-model.h" |
|
#include "llama-kv-cache.h" |
|
#include "llama-adapter.h" |
|
|
|
#include "ggml-cpp.h" |
|
|
|
#include <map> |
|
#include <unordered_map> |
|
#include <vector> |
|
#include <set> |
|
|
|
struct llama_context { |
|
llama_context(const llama_model & model) |
|
: model(model) |
|
, t_start_us(model.t_start_us) |
|
, t_load_us(model.t_load_us) {} |
|
|
|
const struct llama_model & model; |
|
|
|
struct llama_cparams cparams; |
|
struct llama_sbatch sbatch; |
|
struct llama_kv_cache kv_self; |
|
struct llama_adapter_cvec cvec; |
|
|
|
std::unordered_map<struct llama_adapter_lora *, float> lora; |
|
|
|
std::vector<ggml_backend_ptr> backends; |
|
std::vector<std::pair<ggml_backend_t, ggml_backend_set_n_threads_t>> set_n_threads_fns; |
|
|
|
ggml_backend_t backend_cpu = nullptr; |
|
|
|
ggml_threadpool_t threadpool = nullptr; |
|
ggml_threadpool_t threadpool_batch = nullptr; |
|
|
|
bool has_evaluated_once = false; |
|
|
|
mutable int64_t t_start_us; |
|
mutable int64_t t_load_us; |
|
mutable int64_t t_p_eval_us = 0; |
|
mutable int64_t t_eval_us = 0; |
|
|
|
mutable int64_t t_compute_start_us = 0; |
|
mutable int64_t n_queued_tokens = 0; |
|
|
|
mutable int32_t n_p_eval = 0; |
|
mutable int32_t n_eval = 0; |
|
|
|
|
|
ggml_backend_buffer_ptr buf_output; |
|
|
|
|
|
size_t logits_size = 0; |
|
float * logits = nullptr; |
|
|
|
std::vector<int32_t> output_ids; |
|
size_t output_size = 0; |
|
int32_t n_outputs = 0; |
|
|
|
bool logits_all = false; |
|
|
|
|
|
|
|
size_t embd_size = 0; |
|
float * embd = nullptr; |
|
|
|
|
|
|
|
std::map<llama_seq_id, std::vector<float>> embd_seq; |
|
|
|
|
|
bool is_encoding = false; |
|
|
|
|
|
|
|
|
|
int n_pos_per_token = 1; |
|
|
|
|
|
std::vector<float> embd_enc; |
|
std::vector<std::set<llama_seq_id>> seq_ids_enc; |
|
|
|
|
|
std::vector<uint8_t> buf_compute_meta; |
|
ggml_backend_sched_ptr sched; |
|
|
|
ggml_abort_callback abort_callback = nullptr; |
|
void * abort_callback_data = nullptr; |
|
|
|
|
|
struct ggml_tensor * inp_tokens; |
|
struct ggml_tensor * inp_embd; |
|
struct ggml_tensor * inp_pos; |
|
struct ggml_tensor * inp_out_ids; |
|
struct ggml_tensor * inp_KQ_mask; |
|
struct ggml_tensor * inp_KQ_mask_swa; |
|
struct ggml_tensor * inp_K_shift; |
|
struct ggml_tensor * inp_mean; |
|
struct ggml_tensor * inp_cls; |
|
struct ggml_tensor * inp_s_copy; |
|
struct ggml_tensor * inp_s_mask; |
|
struct ggml_tensor * inp_s_seq; |
|
struct ggml_tensor * inp_pos_bucket; |
|
struct ggml_tensor * inp_embd_enc; |
|
struct ggml_tensor * inp_KQ_mask_cross; |
|
}; |
|
|
|
|
|
void llama_set_k_shift(struct llama_context & lctx); |
|
|
|
void llama_set_s_copy(struct llama_context & lctx); |
|
|
|
void llama_set_inputs(llama_context & lctx, const llama_ubatch & ubatch); |
|
|
|
|
|
|
|
size_t llama_output_reserve(struct llama_context & lctx, size_t n_outputs); |
|
|
|
|
|
void llama_output_reorder(struct llama_context & ctx); |
|
|
|
|
|
|
|
const std::vector<std::pair<std::string, struct ggml_tensor *>> & llama_internal_get_tensor_map(struct llama_context * ctx); |
|
|