ethnmcl commited on
Commit
bacc9d2
·
verified ·
1 Parent(s): 93a3159

Update Dockerfile

Browse files
Files changed (1) hide show
  1. Dockerfile +10 -9
Dockerfile CHANGED
@@ -1,27 +1,28 @@
1
- # Dockerfile
2
  FROM python:3.11-slim
3
 
4
- # Optional: speed up HF downloads & cache in container
5
- ENV HF_HOME=/data/huggingface \
 
 
6
  HF_HUB_ENABLE_HF_TRANSFER=1 \
7
  PIP_NO_CACHE_DIR=1 \
8
  PYTHONUNBUFFERED=1
9
 
10
- # -------- MODEL CONFIG --------
11
- # Build-time overrideable default
12
  ARG MODEL_ID=ethnmcl/checkin-lora-gpt2
13
- # Runtime ENV (can still be overridden by deploy env)
14
  ENV MODEL_ID=${MODEL_ID}
15
 
16
- # Optional: if private model, pass HF_TOKEN at runtime only (not here)
17
- # ENV HF_TOKEN=...
18
-
19
  WORKDIR /app
20
  COPY requirements.txt /app/requirements.txt
 
21
  RUN pip install --upgrade pip && pip install -r requirements.txt
22
 
 
 
 
23
  COPY . /app
24
 
25
  EXPOSE 7860
26
  CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"]
27
 
 
 
 
1
  FROM python:3.11-slim
2
 
3
+ # --- Hugging Face caches to a writable path inside the container ---
4
+ ENV HF_HOME=/app/hf_cache \
5
+ TRANSFORMERS_CACHE=/app/hf_cache \
6
+ HF_DATASETS_CACHE=/app/hf_cache \
7
  HF_HUB_ENABLE_HF_TRANSFER=1 \
8
  PIP_NO_CACHE_DIR=1 \
9
  PYTHONUNBUFFERED=1
10
 
11
+ # Model id can be overridden at build or runtime
 
12
  ARG MODEL_ID=ethnmcl/checkin-lora-gpt2
 
13
  ENV MODEL_ID=${MODEL_ID}
14
 
 
 
 
15
  WORKDIR /app
16
  COPY requirements.txt /app/requirements.txt
17
+
18
  RUN pip install --upgrade pip && pip install -r requirements.txt
19
 
20
+ # Ensure the cache directory exists and is writable (adjust ownership for your base image/user policy)
21
+ RUN mkdir -p /app/hf_cache && chmod -R 777 /app/hf_cache
22
+
23
  COPY . /app
24
 
25
  EXPOSE 7860
26
  CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"]
27
 
28
+