Spaces:
Running
Running
| FROM python:3.9-slim | |
| ARG HF_TOKEN | |
| ENV HF_TOKEN=${HF_TOKEN} | |
| ENV MODEL_REPO="TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF" | |
| ENV MODEL_FILE="tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf" | |
| ENV MODEL_DIR="/tmp/models" | |
| ENV MODEL_PATH="/tmp/models/${MODEL_FILE}" | |
| # cache-bust on demand | |
| ARG BUILD_ID=deploy-001 | |
| ENV BUILD_ID=${BUILD_ID} | |
| WORKDIR /app | |
| # System deps | |
| RUN apt-get update && apt-get install -y --no-install-recommends \ | |
| build-essential curl git git-lfs cmake python3-dev wget \ | |
| && rm -rf /var/lib/apt/lists/* | |
| # Python deps first (better cache) | |
| COPY requirements.txt /app/requirements.txt | |
| RUN pip3 install --no-cache-dir -r /app/requirements.txt huggingface_hub | |
| # App code (TemplateA subtree root) | |
| COPY streamlit_app.py /app/streamlit_app.py | |
| COPY modules/ /app/modules/ | |
| # Canonical code (you rsync these into TemplateA before subtree push) | |
| COPY model/ /app/model/ | |
| COPY utils/ /app/utils/ | |
| # (Optional) download model at build time; or leave for runtime | |
| # RUN python3 /app/model/download_model.py | |
| # Streamlit config | |
| EXPOSE 8501 | |
| HEALTHCHECK CMD curl --fail http://localhost:8501/_stcore/health | |
| RUN mkdir -p /tmp/.streamlit /.streamlit && chmod -R 777 /.streamlit | |
| ENV STREAMLIT_HOME=/tmp/.streamlit | |
| ENV XDG_CONFIG_HOME=/tmp/.streamlit | |
| ENV BROWSER_GATHER_USAGE_STATS=false | |
| RUN echo "[browser]\ngatherUsageStats = false" > /tmp/.streamlit/config.toml | |
| # Path where your downloader stores the model | |
| ENV MODEL_PATH=/tmp/models/tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf | |
| ENTRYPOINT ["streamlit", "run", "streamlit_app.py", "--server.port=8501", "--server.address=0.0.0.0"] | |