FROM python:3.11.6-bullseye RUN apt update && apt install -y cmake libopenblas-dev RUN git clone https://github.com/ggerganov/llama.cpp WORKDIR llama.cpp RUN mkdir build WORKDIR build RUN cmake .. -DLLAMA_BLAS=ON -DLLAMA_BLAS_VENDOR=OpenBLAS -DLLAMA_NATIVE=ON RUN cmake --build . --config Release WORKDIR bin RUN wget https://huggingface.co/nisten/obsidian-3b-multimodal-q6-gguf/resolve/main/mmproj-obsidian-f16.gguf -O mmproj-model-f16.gguf RUN wget https://huggingface.co/nisten/obsidian-3b-multimodal-q6-gguf/resolve/main/obsidian-q6.gguf -O ggml-model-f16.gguf CMD ["./server", "-m", "ggml-model-f16.gguf", "--mmproj", "mmproj-model-f16.gguf", "--host", "0.0.0.0", "--port", "7860", "-c", "2048", "--batch-size", "1024", "--verbose"]