Spaces:
Running
on
T4
Running
on
T4
GPU Edition
Browse files- Dockerfile +4 -4
Dockerfile
CHANGED
@@ -1,9 +1,9 @@
|
|
1 |
-
FROM
|
2 |
RUN mkdir /opt/koboldcpp
|
3 |
RUN apt update && apt install git build-essential libopenblas-dev wget python3-pip -y
|
4 |
RUN git clone https://github.com/lostruins/koboldcpp /opt/koboldcpp
|
5 |
WORKDIR /opt/koboldcpp
|
6 |
-
RUN make LLAMA_OPENBLAS=1
|
7 |
-
RUN wget -O model.ggml
|
8 |
-
CMD ["/bin/python3", "./koboldcpp.py", "--model", "model.ggml", "--port", "7860", "--hordeconfig", "HF_SPACE_Tiefighter", "1", "1"]
|
9 |
|
|
|
1 |
+
FROM nvidia/cuda:11.0.3-devel-ubi8
|
2 |
RUN mkdir /opt/koboldcpp
|
3 |
RUN apt update && apt install git build-essential libopenblas-dev wget python3-pip -y
|
4 |
RUN git clone https://github.com/lostruins/koboldcpp /opt/koboldcpp
|
5 |
WORKDIR /opt/koboldcpp
|
6 |
+
RUN make LLAMA_OPENBLAS=1 LLAMA_CUBLAS=1
|
7 |
+
RUN wget -O model.ggml $MODEL
|
8 |
+
CMD ["/bin/python3", "./koboldcpp.py", "--model", "model.ggml", "--usecublas", "mmq", "--multiuser", "--contextsize", "4096", "--port", "7860", "--hordeconfig", "HF_SPACE_Tiefighter", "1", "1"]
|
9 |
|