MODEL_PATH = "./models/Llama-2-7b-Chat-GPTQ" # if MODEL_PATH is "", default llama.cpp/gptq models # will be downloaded to: ./models # Example gptq path: # MODEL_PATH = "./models/Llama-2-7b-Chat-GPTQ" # options: llama.cpp, gptq, transformers BACKEND_TYPE = "gptq" # only for transformers bitsandbytes 8 bit LOAD_IN_8BIT = False MAX_MAX_NEW_TOKENS = 2048 DEFAULT_MAX_NEW_TOKENS = 1024 MAX_INPUT_TOKEN_LENGTH = 4000 DEFAULT_SYSTEM_PROMPT = "You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature. If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information."