muryshev's picture
Update Dockerfile
0984342
raw
history blame
1.12 kB
ARG UBUNTU_VERSION=22.04
ARG CUDA_VERSION=12.3.1
ARG BASE_CUDA_DEV_CONTAINER=nvidia/cuda:${CUDA_VERSION}-devel-ubuntu${UBUNTU_VERSION}
ARG BASE_CUDA_RUN_CONTAINER=nvidia/cuda:${CUDA_VERSION}-runtime-ubuntu${UBUNTU_VERSION}
FROM ${BASE_CUDA_DEV_CONTAINER} as build
ARG CUDA_DOCKER_ARCH=all
RUN apt-get update && \
apt-get install -y build-essential git cmake wget
WORKDIR /build
RUN git clone https://github.com/ggerganov/llama.cpp.git
WORKDIR /build/llama.cpp
ENV CUDA_DOCKER_ARCH=${CUDA_DOCKER_ARCH}
ENV LLAMA_CUBLAS=1
RUN mkdir build && \
cd build && \
cmake .. -DLLAMA_CUBLAS=ON && \
cmake --build . --config Release
WORKDIR /data
RUN wget https://huggingface.co/IlyaGusev/saiga2_13b_gguf/resolve/main/model-q8_0.gguf -nv -O model.gguf
FROM ${BASE_CUDA_RUN_CONTAINER} as runtime
WORKDIR /app
# Copy the executable from the build stage
COPY --from=build /build/llama.cpp/build/bin/server /app
COPY --from=build /data/model.gguf /data/model.gguf
COPY ./run.sh /app/run.sh
WORKDIR /app
EXPOSE 7860
# Make the script executable
RUN chmod +x run.sh
# CMD to run your script
CMD ./run.sh