Spaces:
Running
Running
# Use official slim Python image | |
FROM python:3.10-slim | |
# Set environment variables to control cache location | |
ENV TRANSFORMERS_CACHE=/app/hf_cache \ | |
HF_HOME=/app/hf_cache \ | |
XDG_CACHE_HOME=/app/hf_cache \ | |
TORCH_HOME=/app/hf_cache \ | |
HF_DATASETS_CACHE=/app/hf_cache \ | |
SAFE_TENSORS_CACHE=/app/hf_cache | |
# Set working directory | |
WORKDIR /app | |
# Install git for loading models from Hugging Face | |
RUN apt-get update && apt-get install -y git && rm -rf /var/lib/apt/lists/* | |
# Install Python dependencies | |
COPY requirements.txt . | |
RUN pip install --no-cache-dir -r requirements.txt | |
# Pre-download the model and tokenizer (build time caching) | |
RUN python -c "\ | |
from transformers import AutoTokenizer, AutoModelForSequenceClassification; \ | |
model_name = 'tabularisai/multilingual-sentiment-analysis'; \ | |
AutoTokenizer.from_pretrained(model_name); \ | |
AutoModelForSequenceClassification.from_pretrained(model_name); \ | |
" | |
# Copy application code | |
COPY . . | |
# Expose port for Uvicorn | |
EXPOSE 7860 | |
# Command to run FastAPI with Uvicorn | |
CMD ["uvicorn", "sentiment_api:app", "--host", "0.0.0.0", "--port", "7860"] | |