spuun commited on
Commit
8276f8e
·
1 Parent(s): 5711d0b

Update Dockerfile

Browse files
Files changed (1) hide show
  1. Dockerfile +1 -2
Dockerfile CHANGED
@@ -4,7 +4,6 @@ RUN apt update && apt install -y cmake
4
  RUN git clone https://github.com/ggerganov/llama.cpp
5
  WORKDIR llama.cpp
6
 
7
- RUN git checkout 438c2ca83045a00ef244093d27e9ed41a8cb4ea9
8
  RUN mkdir build
9
  WORKDIR build
10
  RUN cmake .. -DLLAMA_BLAS=ON -DLLAMA_BLAS_VENDOR=OpenBLAS -DLLAMA_NATIVE=ON
@@ -14,4 +13,4 @@ WORKDIR bin
14
  RUN wget https://huggingface.co/mys/ggml_llava-v1.5-7b/resolve/main/mmproj-model-f16.gguf
15
  RUN wget https://huggingface.co/mys/ggml_llava-v1.5-7b/resolve/main/ggml-model-f16.gguf
16
 
17
- CMD ["./server", "-m", "ggml-model-f16.gguf", "--mmproj", "mmproj-model-f16.gguf", "--host", "0.0.0.0", "--port", "7860", "-c", "2048", "--verbose"]
 
4
  RUN git clone https://github.com/ggerganov/llama.cpp
5
  WORKDIR llama.cpp
6
 
 
7
  RUN mkdir build
8
  WORKDIR build
9
  RUN cmake .. -DLLAMA_BLAS=ON -DLLAMA_BLAS_VENDOR=OpenBLAS -DLLAMA_NATIVE=ON
 
13
  RUN wget https://huggingface.co/mys/ggml_llava-v1.5-7b/resolve/main/mmproj-model-f16.gguf
14
  RUN wget https://huggingface.co/mys/ggml_llava-v1.5-7b/resolve/main/ggml-model-f16.gguf
15
 
16
+ CMD ["./server", "-m", "ggml-model-f16.gguf", "--mmproj", "mmproj-model-f16.gguf", "--host", "0.0.0.0", "--port", "7860", "-c", "2048", "--batch-size", "1024", "--verbose"]