dkdaniz commited on
Commit
227dbd1
1 Parent(s): 0b35f41

Update Dockerfile

Browse files
Files changed (1) hide show
  1. Dockerfile +8 -4
Dockerfile CHANGED
@@ -2,11 +2,15 @@
2
  # Build as `docker build . -t localgpt`, requires BuildKit.
3
  # Run as `docker run -it --mount src="$HOME/.cache",target=/root/.cache,type=bind --gpus=all localgpt`, requires Nvidia container toolkit.
4
 
5
- FROM nvidia/cuda:11.7.1-runtime-ubuntu22.04
 
 
6
  RUN apt-get update && apt-get install -y software-properties-common
7
  RUN apt-get install -y g++-11 make python3 python-is-python3 pip
 
8
  # only copy what's needed at every step to optimize layer cache
9
  COPY ./requirements.txt .
 
10
  # use BuildKit cache mount to drastically reduce redownloading from pip on repeated builds
11
  RUN --mount=type=cache,target=/root/.cache CMAKE_ARGS="-DLLAMA_CUBLAS=on" FORCE_CMAKE=1 pip install --timeout 100 -r requirements.txt llama-cpp-python==0.1.83
12
  COPY SOURCE_DOCUMENTS ./SOURCE_DOCUMENTS
@@ -14,8 +18,8 @@ COPY ingest.py constants.py ./
14
  # Docker BuildKit does not support GPU during *docker build* time right now, only during *docker run*.
15
  # See <https://github.com/moby/buildkit/issues/1436>.
16
  # If this changes in the future you can `docker build --build-arg device_type=cuda . -t localgpt` (+GPU argument to be determined).
17
- ARG device_type=cpu
18
- RUN --mount=type=cache,target=/root/.cache python ingest.py --device_type $device_type
19
  COPY . .
20
  ENV device_type=cuda
21
- CMD python run_localGPT.py --device_type $device_type
 
2
  # Build as `docker build . -t localgpt`, requires BuildKit.
3
  # Run as `docker run -it --mount src="$HOME/.cache",target=/root/.cache,type=bind --gpus=all localgpt`, requires Nvidia container toolkit.
4
 
5
+ ARG CUDA_IMAGE="12.1.1-devel-ubuntu22.04"
6
+ FROM nvidia/cuda:${CUDA_IMAGE}
7
+
8
  RUN apt-get update && apt-get install -y software-properties-common
9
  RUN apt-get install -y g++-11 make python3 python-is-python3 pip
10
+
11
  # only copy what's needed at every step to optimize layer cache
12
  COPY ./requirements.txt .
13
+
14
  # use BuildKit cache mount to drastically reduce redownloading from pip on repeated builds
15
  RUN --mount=type=cache,target=/root/.cache CMAKE_ARGS="-DLLAMA_CUBLAS=on" FORCE_CMAKE=1 pip install --timeout 100 -r requirements.txt llama-cpp-python==0.1.83
16
  COPY SOURCE_DOCUMENTS ./SOURCE_DOCUMENTS
 
18
  # Docker BuildKit does not support GPU during *docker build* time right now, only during *docker run*.
19
  # See <https://github.com/moby/buildkit/issues/1436>.
20
  # If this changes in the future you can `docker build --build-arg device_type=cuda . -t localgpt` (+GPU argument to be determined).
21
+ ARG device_type=cuda
22
+ RUN --mount=type=cache,target=/root/.cache python ingest.py --device_type "cuda"
23
  COPY . .
24
  ENV device_type=cuda
25
+ CMD python run_localGPT.py --device_type "cuda"