Spaces:
Paused
Paused
Upload Dockerfile
Browse files- Dockerfile +4 -0
Dockerfile
CHANGED
@@ -15,6 +15,10 @@ RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt
|
|
15 |
ENV TRANSFORMERS_CACHE=/code/cache/huggingface/transformers
|
16 |
ENV HF_HOME=/code/cache/huggingface
|
17 |
|
|
|
|
|
|
|
|
|
18 |
# Download the llama-2-7b-chat.ggmlv3.q8_0.bin model file into the container
|
19 |
# Replace the URL below with the actual URL from where the model file should be downloaded
|
20 |
RUN wget -O /code/llama-2-7b-chat.ggmlv3.q8_0.bin https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGML/blob/main/llama-2-7b-chat.ggmlv3.q8_0.bin
|
|
|
15 |
ENV TRANSFORMERS_CACHE=/code/cache/huggingface/transformers
|
16 |
ENV HF_HOME=/code/cache/huggingface
|
17 |
|
18 |
+
# Create the cache directory with correct permissions
|
19 |
+
RUN mkdir -p /code/cache/huggingface/transformers && \
|
20 |
+
chmod -R 777 /code/cache/huggingface
|
21 |
+
|
22 |
# Download the llama-2-7b-chat.ggmlv3.q8_0.bin model file into the container
|
23 |
# Replace the URL below with the actual URL from where the model file should be downloaded
|
24 |
RUN wget -O /code/llama-2-7b-chat.ggmlv3.q8_0.bin https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGML/blob/main/llama-2-7b-chat.ggmlv3.q8_0.bin
|