|
#!/bin/bash |
|
|
|
export WORK="/home/user/app" |
|
cd $WORK |
|
|
|
echo "β
Downloading llamacpp..." |
|
|
|
wget -O llama_cpp.zip https://github.com/ggml-org/llama.cpp/releases/download/b6102/llama-b6102-bin-ubuntu-x64.zip > /dev/null 2>&1 |
|
unzip llama_cpp.zip > /dev/null 2>&1 |
|
|
|
echo "β
Booting up llama server..." |
|
|
|
wget -O model.gguf https://huggingface.co/lmstudio-community/Qwen3-4B-Thinking-2507-GGUF/resolve/main/Qwen3-4B-Thinking-2507-Q6_K.gguf?download=true > /dev/null 2>&1 |
|
|
|
|
|
./build/bin/llama-server -m model.gguf --port 8000 --host 0.0.0.0 --threads 2 --ctx-size 4096 --mlock --jinja |
|
|
|
echo "β
llama server running on port 8000" |