Spaces:
Runtime error
Runtime error
ContextSize as a var
Browse files- Dockerfile +2 -1
Dockerfile
CHANGED
@@ -1,11 +1,12 @@
|
|
1 |
FROM nvidia/cuda:11.8.0-devel-ubuntu22.04
|
2 |
ARG MODEL
|
3 |
ARG MODEL_NAME
|
|
|
4 |
RUN mkdir /opt/koboldcpp
|
5 |
RUN apt update && apt install git build-essential libopenblas-dev wget python3-pip -y
|
6 |
RUN git clone https://github.com/lostruins/koboldcpp /opt/koboldcpp
|
7 |
WORKDIR /opt/koboldcpp
|
8 |
RUN make LLAMA_OPENBLAS=1 LLAMA_CUBLAS=1 LLAMA_PORTABLE=1
|
9 |
RUN wget -O model.ggml $MODEL
|
10 |
-
CMD /bin/python3 ./koboldcpp.py --model model.ggml --usecublas mmq --gpulayers 99 --multiuser --contextsize
|
11 |
|
|
|
1 |
FROM nvidia/cuda:11.8.0-devel-ubuntu22.04
|
2 |
ARG MODEL
|
3 |
ARG MODEL_NAME
|
4 |
+
ARG CONTEXT_SIZE
|
5 |
RUN mkdir /opt/koboldcpp
|
6 |
RUN apt update && apt install git build-essential libopenblas-dev wget python3-pip -y
|
7 |
RUN git clone https://github.com/lostruins/koboldcpp /opt/koboldcpp
|
8 |
WORKDIR /opt/koboldcpp
|
9 |
RUN make LLAMA_OPENBLAS=1 LLAMA_CUBLAS=1 LLAMA_PORTABLE=1
|
10 |
RUN wget -O model.ggml $MODEL
|
11 |
+
CMD /bin/python3 ./koboldcpp.py --model model.ggml --usecublas mmq --gpulayers 99 --multiuser --contextsize $CONTEXT_SIZE --port 7860 --hordeconfig $MODEL_NAME 1 1
|
12 |
|