--- hub: repo_id: Nekochu/Luminia-13B-v3 filename: Luminia-13B-v3-Q4_K_M.gguf llama_cpp: n_ctx: 4096 # n_gpu_layers: 40 # llama 13b has 40 layers chat: stop: - "" - "" - "### USER:" - "USER:" queue: max_size: 16 concurrency_count: 1 # leave this at 1, llama-cpp-python doesn't handle concurrent requests and will crash the entire app