File size: 445 Bytes
140387c c2808c5 140387c c2808c5 140387c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 |
MODEL_PATH = ""
# if MODEL_PATH is "", default llama.cpp/gptq models
# will be downloaded to: ./models
# Example ggml path:
#MODEL_PATH = "models/llama2_7b_chat.bin"
# options: llama.cpp, gptq, transformers
BACKEND_TYPE = "llama.cpp"
#BACKEND_TYPE = "llama2.cu"
# only for transformers bitsandbytes 8 bit
LOAD_IN_8BIT = False
MAX_MAX_NEW_TOKENS = 2048
DEFAULT_MAX_NEW_TOKENS = 1024
MAX_INPUT_TOKEN_LENGTH = 4000
DEFAULT_SYSTEM_PROMPT = ""
|