dkdaniz commited on
Commit
a27c717
1 Parent(s): f627f62

Update Dockerfile

Browse files
Files changed (1) hide show
  1. Dockerfile +8 -7
Dockerfile CHANGED
@@ -17,20 +17,21 @@ RUN python3 -m pip install --upgrade pip pytest cmake \
17
  pydantic-settings starlette-context gradio huggingface_hub hf_transfer
18
 
19
  RUN apt-get update && apt-get install -y software-properties-common
20
- RUN apt-get install -y g++-11 make python3 python-is-python3 pip
21
 
22
  # only copy what's needed at every step to optimize layer cache
23
  COPY ./requirements.txt .
24
 
25
  # use BuildKit cache mount to drastically reduce redownloading from pip on repeated builds
26
- RUN --mount=type=cache,target=/root/.cache CMAKE_ARGS="-DLLAMA_CUBLAS=on" FORCE_CMAKE=1 pip install --timeout 100 -r requirements.txt llama-cpp-python==0.1.83
 
27
  COPY SOURCE_DOCUMENTS ./SOURCE_DOCUMENTS
 
28
  COPY ingest.py constants.py ./
29
- # Docker BuildKit does not support GPU during *docker build* time right now, only during *docker run*.
30
- # See <https://github.com/moby/buildkit/issues/1436>.
31
- # If this changes in the future you can `docker build --build-arg device_type=cuda . -t localgpt` (+GPU argument to be determined).
32
  ARG device_type=cuda
33
- RUN --mount=type=cache,target=/root/.cache python ingest.py --device_type $device_type
 
34
  COPY . .
35
  ENV device_type=cuda
36
- CMD python run_localGPT.py --device_type $device_type
 
17
  pydantic-settings starlette-context gradio huggingface_hub hf_transfer
18
 
19
  RUN apt-get update && apt-get install -y software-properties-common
20
+ RUN apt-get install -y g++-11
21
 
22
  # only copy what's needed at every step to optimize layer cache
23
  COPY ./requirements.txt .
24
 
25
  # use BuildKit cache mount to drastically reduce redownloading from pip on repeated builds
26
+ RUN CMAKE_ARGS="-DLLAMA_CUBLAS=on" pip install requirements.txt llama-cpp-python
27
+
28
  COPY SOURCE_DOCUMENTS ./SOURCE_DOCUMENTS
29
+
30
  COPY ingest.py constants.py ./
31
+
 
 
32
  ARG device_type=cuda
33
+ RUN python3 ingest.py --device_type $device_type
34
+
35
  COPY . .
36
  ENV device_type=cuda
37
+ CMD python3 run_localGPT.py --device_type $device_type