File size: 365 Bytes
15fe28d
3e6c7a2
 
15fe28d
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
---
repo: TheBloke/Manticore-13B-GGML
file: Manticore-13B.ggmlv2.q5_1.bin
llama_cpp:
  n_ctx: 2048
  n_gpu_layers: 40  # llama 13b has 40 layers
chat:
  stop:
    - "</s>"
    - "<unk>"
    - "### USER:"
    - "USER:"
queue:
  max_size: 16
  concurrency_count: 1  # leave this at 1, llama-cpp-python doesn't handle concurrent requests and will crash the entire app