# BEGIN GENERAL GGUF METADATA | |
id: aya23 | |
model: aya23 | |
name: aya23 | |
version: 1 | |
# END GENERAL GGUF METADATA | |
# BEGIN INFERENCE PARAMETERS | |
# BEGIN REQUIRED | |
stop: | |
- <|END_OF_TURN_TOKEN|> | |
# END REQUIRED | |
# BEGIN OPTIONAL | |
stream: true | |
top_p: 0.9 | |
temperature: 0.7 | |
frequency_penalty: 0 | |
presence_penalty: 0 | |
max_tokens: 4096 | |
seed: -1 | |
dynatemp_range: 0 | |
dynatemp_exponent: 1 | |
top_k: 40 | |
min_p: 0.05 | |
tfs_z: 1 | |
typ_p: 1 | |
repeat_last_n: 64 | |
repeat_penalty: 1 | |
mirostat: false | |
mirostat_tau: 5 | |
mirostat_eta: 0.100000001 | |
penalize_nl: false | |
ignore_eos: false | |
n_probs: 0 | |
min_keep: 0 | |
# END OPTIONAL | |
# END INFERENCE PARAMETERS | |
# BEGIN MODEL LOAD PARAMETERS | |
# BEGIN REQUIRED | |
engine: llama-cpp | |
prompt_template: <|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>{system_message}<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|USER_TOKEN|>{prompt}<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|> | |
ctx_len: 4096 | |
ngl: 34 | |
# END REQUIRED | |
# END MODEL LOAD PARAMETERS | |