// Copyright (C) 2024 Kumo inc.
// Author: Jeff.li lijippy@163.com
// All rights reserved.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program.  If not, see <https://www.gnu.org/licenses/>.
//

#include <kllm/config/config.h>

TURBO_FLAG(int32_t, n_predict, -1, "new tokens to predict");

TURBO_FLAG(int32_t, n_ctx, 4096, "context size");

TURBO_FLAG(int32_t, n_batch, 2048, "logical batch size for prompt processing (must be >=32 to use BLAS)");

TURBO_FLAG(int32_t, n_ubatch, 512, "physical batch size for prompt processing (must be >=32 to use BLAS)");

TURBO_FLAG(int32_t, n_keep, 0, "number of tokens to keep from initial prompt");

TURBO_FLAG(int32_t, n_draft, 5, "number of tokens to draft during speculative decoding");

TURBO_FLAG(int32_t, n_chunks, -1, "max number of chunks to process (-1 = unlimited)");

TURBO_FLAG(int32_t, n_parallel, 1, "number of parallel sequences to decode");

TURBO_FLAG(int32_t, n_sequences, 1, "number of sequences to decode");

TURBO_FLAG(float, p_split, 0.1f, "speculative decoding split probability");

TURBO_FLAG(int32_t, n_gpu_layers, -1, "number of layers to store in VRAM (-1 - use default)");

TURBO_FLAG(int32_t, n_gpu_layers_draft, -1, "number of layers to store in VRAM for the draft model (-1 - use default)");

TURBO_FLAG(int32_t, main_gpu, 0, "the GPU that is used for scratch and small tensors");

TURBO_FLAG(std::vector<std::string>, tensor_split, { }, "how split tensors should be distributed across GPUs");

TURBO_FLAG(int32_t, grp_attn_n, 1, "group-attention factor");

TURBO_FLAG(int32_t, grp_attn_w, 512, "group-attention width");

TURBO_FLAG(int32_t, n_print, -1, "print token count every n tokens (-1 = disabled)");

TURBO_FLAG(float, rope_freq_base, 0.0f, "RoPE base frequency");

TURBO_FLAG(float, rope_freq_scale, 0.0f, "RoPE frequency scaling factor");

TURBO_FLAG(float, yarn_ext_factor, -1.0f, "YaRN extrapolation mix factor");

TURBO_FLAG(float, yarn_attn_factor, 1.0f, "YaRN magnitude scaling factor");

TURBO_FLAG(float, yarn_beta_fast, 32.0f, "YaRN low correction dim");

TURBO_FLAG(float, yarn_beta_slow, 1.0f, "YaRN high correction dim");

TURBO_FLAG(int32_t, yarn_orig_ctx, 0, "YaRN original context length");

TURBO_FLAG(float, defrag_thold, -1.0f, "KV cache defragmentation threshold");

TURBO_FLAG(std::string, model, "", "model path");

TURBO_FLAG(std::string, model_draft, "", "draft model for speculative decoding");
TURBO_FLAG(std::string, model_alias, "unknown", "model alias");
TURBO_FLAG(std::string, model_url, "", "model url to download");

TURBO_FLAG(std::string, hf_token, "", "HF token");

TURBO_FLAG(std::string, hf_repo, "", "HF repo");

TURBO_FLAG(std::string, hf_file, "", "HF file");

TURBO_FLAG(std::string, prompt, "", "prompt");

TURBO_FLAG(std::string, prompt_file, "", "store the external prompt file name");

TURBO_FLAG(std::string, path_prompt_cache, "", "path to file for saving/loading prompt eval state");

TURBO_FLAG(std::string, input_prefix, "", "string to prefix user inputs with");

TURBO_FLAG(std::string, input_suffix, "", "string to suffix user inputs with");
TURBO_FLAG(std::string, lookup_cache_static, "", "path of static ngram cache file for lookup decoding");
TURBO_FLAG(std::string, lookup_cache_dynamic, "", "path of dynamic ngram cache file for lookup decoding");
TURBO_FLAG(std::string, rpc_servers, "", "comma separated list of RPC servers");

TURBO_FLAG(std::vector<std::string>, in_files, {}, "all input files");
TURBO_FLAG(std::vector<std::string>, antiprompt, {},
           "strings upon which more user input is prompted (a.k.a. reverse prompts)");

TURBO_FLAG(bool, lora_init_without_apply, false,
           " only load lora to memory, but do not apply it to ctx (user can manually apply lora later using llama_lora_adapter_apply)");

TURBO_FLAG(bool, hellaswag, false, "compute HellaSwag score over random tasks from datafile supplied in prompt");
TURBO_FLAG(size_t, hellaswag_tasks, 400, "number of tasks to use when computing the HellaSwag score");

TURBO_FLAG(bool, winogrande, false, "compute Winogrande score over random tasks from datafile supplied in prompt");

TURBO_FLAG(size_t, winogrande_tasks, 0,
           "number of tasks to use when computing the Winogrande score. If 0, all tasks will be computed");

TURBO_FLAG(bool, multiple_choice, false, "compute TruthfulQA score over random tasks from datafile supplied in prompt");

TURBO_FLAG(size_t, multiple_choice_tasks, 0,
           "number of tasks to use when computing the TruthfulQA score. If 0, all tasks will be computed");

TURBO_FLAG(bool, kl_divergence, false, "compute KL divergence");

TURBO_FLAG(int32_t, port, 8080, "server listens on this network port");

TURBO_FLAG(int32_t, timeout_read, 600, "http read timeout in seconds");

TURBO_FLAG(int32_t, timeout_write, 600, "http write timeout in seconds");

TURBO_FLAG(int32_t, n_threads_http, -1, "number of threads to process HTTP requests");

TURBO_FLAG(int32_t, n_threads, -1, "number of threads to use during generation (default: auto)");

TURBO_FLAG(int32_t, batch_n_threads, -1, "number of threads to use during batch and prompt processing (default: same as --threads)");

TURBO_FLAG(int32_t, draft_n_threads, -1, "number of threads to use during generation (default: same as --threads)");

TURBO_FLAG(int32_t, draft_batch_n_threads, -1, "number of threads to use during batch and prompt processing (default: same as --threads-draft)");

TURBO_FLAG(std::string, cpu_mask, "",  "CPU affinity mask: arbitrarily long hex. Complements cpu-range (default: \"\")");

TURBO_FLAG(std::string, cpu_mask_batch, "",  "CPU affinity mask: arbitrarily long hex. Complements cpu-range-batch (default: same as --cpu-mask)");

TURBO_FLAG(std::string, cpu_mask_draft, "",  "Draft model CPU affinity mask. Complements cpu-range-draft (default: same as --cpu-mask)");

TURBO_FLAG(std::string, cpu_mask_batch_draft, "",  "Draft model CPU affinity mask. Complements cpu-range-draft (default: same as --cpu-mask)");

TURBO_FLAG(std::string, cpu_range, "",  "range of CPUs for affinity. Complements --cpu-mask");

TURBO_FLAG(std::string, cpu_range_batch, "",  "ranges of CPUs for affinity. Complements --cpu-mask-batch");

TURBO_FLAG(std::string, cpu_range_draft, "",  "Ranges of CPUs for affinity. Complements --cpu-mask-draft");

TURBO_FLAG(std::string, cpu_range_batch_draft, "",  "Ranges of CPUs for affinity. Complements --cpu-mask-draft");

TURBO_FLAG(bool, strict_cpu, false, "use strict CPU placement (default: false)");

TURBO_FLAG(bool, strict_cpu_batch, false, "use strict CPU placement (default: same as --cpu-strict)");

TURBO_FLAG(bool, strict_cpu_draft, false, "use strict CPU placement (default: same as --cpu-strict)");

TURBO_FLAG(bool, strict_cpu_batch_draft, false, "use strict CPU placement (default: same as --cpu-strict)");

TURBO_FLAG(int32_t, cpu_priority, 0, "set process/thread priority : 0-normal, 1-medium, 2-high, 3-realtime (default: 0)");

TURBO_FLAG(int32_t, cpu_priority_batch, 0, "set process/thread priority : 0-normal, 1-medium, 2-high, 3-realtime (default: 0)");

TURBO_FLAG(int32_t, cpu_priority_draft, 0, "set process/thread priority : 0-normal, 1-medium, 2-high, 3-realtime (default: 0)");

TURBO_FLAG(int32_t, cpu_priority_batch_draft, 0, "set process/thread priority : 0-normal, 1-medium, 2-high, 3-realtime (default: 0)");

TURBO_FLAG(int32_t, cpu_poll, 50, "use polling level<0,100> to wait for work (0 - no polling, default: 50)");

TURBO_FLAG(int32_t, cpu_poll_batch, 50, "use polling level<0,100> to wait for work (0 - no polling, default: 50)");

TURBO_FLAG(int32_t, cpu_poll_draft, 50, "use polling level<0,100> to wait for work (0 - no polling, default: 50)");

TURBO_FLAG(int32_t, cpu_poll_batch_draft, 50, "use polling level<0,100> to wait for work (0 - no polling, default: 50)");

TURBO_FLAG(int32_t, n_cache_reuse, 0, "min chunk size to reuse from the cache via KV shifting");

TURBO_FLAG(bool, escape, true, "escape \"\\n\", \"\\r\", \"\\t\", \"\\'\", \"\\\"\", and \"\\\\\"");

TURBO_FLAG(bool, multiline_input, false, "reverse the usage of `\\`");

TURBO_FLAG(bool, simple_io, false, "improves compatibility with subprocesses and limited consoles");

TURBO_FLAG(bool, cont_batching, true, "insert new sequences for decoding on-the-fly");

TURBO_FLAG(bool, flash_attn, false, "flash attention");

TURBO_FLAG(bool, no_perf, false, "disable performance metrics");

TURBO_FLAG(bool, ctx_shift, true, "context shift on inifinite text generation");

TURBO_FLAG(std::string, out_file, "imatrix.dat", "save the resulting imatrix to this file");

TURBO_FLAG(int32_t, n_out_freq, 10, "output the imatrix every n_out_freq iterations");

TURBO_FLAG(int32_t, n_save_freq, 0, "save the imatrix every n_save_freq iterations");
TURBO_FLAG(int32_t, i_chunk, 0, "start processing from this chunk");
TURBO_FLAG(bool, process_output, false, "collect data for the output tensor");
TURBO_FLAG(bool, compute_ppl, true, "whether to compute perplexity");

TURBO_FLAG(bool, embedding, false, "get only sentence embedding");

TURBO_FLAG(int32_t, embd_normalize, 2,
           "normalisation for embeddings (-1=none, 0=max absolute int16, 1=taxicab, 2=euclidean, >2=p-norm)");
TURBO_FLAG(std::string, embd_out, "",
           "empty = default, \"array\" = [[],[]...], \"json\" = openai style, \"json+\" = same \"json\" + cosine similarity matrix");
TURBO_FLAG(std::string, embd_sep, "\n", "separator of embeddings");

TURBO_FLAG(bool, reranking, false, "enable reranking support on server");

TURBO_FLAG(int32_t, n_junk, 250, "number of times to repeat the junk text");

TURBO_FLAG(int32_t, i_pos, -1, "position of the passkey in the junk text");

TURBO_FLAG(std::string, mmproj, "", "path to multimodal projector");

TURBO_FLAG(std::vector<std::string>, image, {}, "path to image file(s)");

TURBO_FLAG(int32_t, control_vector_layer_start, -1, "layer range for control vector");

TURBO_FLAG(int32_t, control_vector_layer_end, -1, "layer range for control vector");

TURBO_FLAG(int32_t, ppl_stride, 0,
           "stride for perplexity calculations. If left at 0, the pre-existing approach will be used.");

TURBO_FLAG(int32_t, ppl_output_type, 0,
           "= 0 -> ppl output is as usual, = 1 -> ppl output is num_tokens, ppl, one per line (which is more convenient to use for plotting)");

TURBO_FLAG(int32_t, n_pca_batch, 100, "pca batch size");

TURBO_FLAG(int32_t, n_pca_iterations, 1000, "pca iteration size");

TURBO_FLAG(std::string, cvector_outfile, "control_vector.gguf", "cvector output file");
TURBO_FLAG(std::string, cvector_positive_file, "examples/cvector-generator/positive.txt", "cvector output file");

TURBO_FLAG(std::string, cvector_negative_file, "examples/cvector-generator/negative.txt", "cvector output file");

TURBO_FLAG(bool, spm_infill, false, "suffix/prefix/middle pattern for infill");

TURBO_FLAG(std::string, lora_outfile, "ggml-lora-merged-f16.gguf", "ggml-lora-merged-f16.gguf");

TURBO_FLAG(std::string, cache_type_k, "f16", "KV cache data type for the K");

TURBO_FLAG(std::string, cache_type_v, "f16", "KV cache data type for the V");

TURBO_FLAG(std::string, hostname, "127.0.0.1", "hostname");

TURBO_FLAG(std::string, public_path, "", "public path");

TURBO_FLAG(std::string, chat_template, "", "chat template");

TURBO_FLAG(bool, enable_chat_template, true, "chat template");

TURBO_FLAG(std::vector<std::string>, api_keys, {}, "api keys");

TURBO_FLAG(std::string, ssl_file_key, "", "ssl key");

TURBO_FLAG(std::string, ssl_file_cert, "", "ssl cert");

TURBO_FLAG(bool, webui, true, "enable webui");

TURBO_FLAG(bool, endpoint_slots, false, "enable webui");

TURBO_FLAG(bool, endpoint_props, false, "only control POST requests, not GET");

TURBO_FLAG(bool, endpoint_metrics, false, "only control POST requests, not GET");

TURBO_FLAG(bool , is_pp_shared, false, "is pp shared");

TURBO_FLAG(bool , input_prefix_bos, false, "prefix BOS to user inputs, preceding input_prefix");

TURBO_FLAG(bool , logits_all, false, "return logits for all tokens in the batch");

TURBO_FLAG(bool , use_mmap, true, "use mmap for faster loads");

TURBO_FLAG(bool , use_mlock, false, "use mlock to keep model in memory");

TURBO_FLAG(bool , verbose_prompt, false, "print prompt tokens before generation");
TURBO_FLAG(bool , display_prompt, true, "print prompt before generation");

TURBO_FLAG(bool , dump_kv_cache, false, "dump the KV cache contents for debugging purposes");
TURBO_FLAG(bool , no_kv_offload, false, "disable KV offloading");
TURBO_FLAG(bool , warmup, true, "warmup run");
TURBO_FLAG(bool , check_tensors, false, "validate tensor data");

TURBO_FLAG(std::string, slot_save_path, "", "slot save path");

TURBO_FLAG(bool , log_json, false, "log json");

TURBO_FLAG(float , slot_prompt_similarity, 0.5f, "slot prompt similarity");

TURBO_FLAG(std::vector<std::string>, context_files, {}, " context files to embed");

TURBO_FLAG(int32_t, chunk_size, 64, "chunk size for context embedding");

TURBO_FLAG(std::string , chunk_separator,  "\n", "chunk separator for context embedding");


TURBO_FLAG(std::vector<std::string> , n_pp, {}, "n pp");
TURBO_FLAG(std::vector<std::string> , n_tg, {}, "n tg");
TURBO_FLAG(std::vector<std::string> , n_pl, {}, "n pl");

TURBO_FLAG(bool , batched_bench_output_jsonl, false, "log json lines");

TURBO_FLAG(bool , use_color, false, "use color to distinguish generations and inputs");

TURBO_FLAG(bool , special, false, "enable special token output");

TURBO_FLAG(bool , interactive, false, "interactive mode");

TURBO_FLAG(bool , interactive_first, false, "wait for user input immediately");

TURBO_FLAG(bool , conversation, false, "conversation mode (does not print special tokens and suffix/prefix)");

TURBO_FLAG(bool , prompt_cache_all, false, "save user input and generations to prompt cache");

TURBO_FLAG(bool , prompt_cache_ro, false, "open the prompt cache read-only and do not update it");

//TURBO_FLAG(int32_t , verbosity, 0, "open the prompt cache read-only and do not update it");
TURBO_FLAG(int32_t , numa, 0, " NUMA_STRATEGY_DISABLED   = 0,\n"
                              " NUMA_STRATEGY_DISTRIBUTE = 1,\n"
                              " NUMA_STRATEGY_ISOLATE    = 2,\n"
                              " NUMA_STRATEGY_NUMACTL    = 3,\n"
                              " NUMA_STRATEGY_MIRROR     = 4,\n"
                              " NUMA_STRATEGY_COUNT      = 5");

TURBO_FLAG(int32_t , split_mode, 1, "0: single GPU\n"
                                    "1: split layers and KV across GPUs\n"
                                    "2: split layers and KV across GPUs, use tensor parallelism if supported");

//TURBO_FLAG(std::set<int>, examples,{}, "kllm examples");

/*
 *

 */
