Spaces:
Sleeping
Sleeping
File size: 2,228 Bytes
2a46da5 65c9437 1cae688 65c9437 e9fbc1c c638cd1 e9fbc1c 77902b8 6a6e71d 77902b8 69a4674 1827bea 2a46da5 e9fbc1c 7686cad 71ba2e2 8bc1c46 65c9437 6a6e71d 30e282c 1827bea 8bc1c46 cefb504 8bc1c46 cefb504 65c9437 6a6e71d 65c9437 77902b8 65c9437 77902b8 65c9437 77902b8 cefb504 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 |
# Base image with a lightweight version suitable for Hugging Face Spaces
FROM python:3.8-slim
# Install system dependencies
RUN apt-get update && apt-get install -y \
git \
wget \
&& rm -rf /var/lib/apt/lists/*
# Install system-level dependencies for OpenCV
RUN apt-get update && apt-get install -y \
libglib2.0-0 \
libsm6 \
libxext6 \
libxrender-dev \
libgl1-mesa-glx \
&& rm -rf /var/lib/apt/lists/*
# Install Python packages including Hugging Face Transformers, TorchScript, Flask, TensorFlow, TensorFlow Hub, and OpenCV
RUN pip install --no-cache-dir \
torch \
torchvision \
transformers \
requests \
Flask \
Pillow \
huggingface_hub \
tensorflow \
tensorflow_hub \
opencv-python
# Set Hugging Face cache to a guaranteed writable directory
ENV TRANSFORMERS_CACHE=/tmp/cache
RUN mkdir -p /tmp/cache && chmod -R 777 /tmp/cache
# Create directories for the models
RUN mkdir -p /models/blip /models/clip
# Python script to download models using tensorflow_hub and huggingface_hub
RUN echo "import os\n\
import tensorflow_hub as hub\n\n\
# Download MoveNet model from TensorFlow Hub (loaded directly in app, not saved)\n\
movenet_model = hub.load('https://tfhub.dev/google/movenet/singlepose/lightning/4')\n\n\
# Download BLIP model and tokenizer using huggingface_hub\n\
from transformers import BlipForConditionalGeneration, BlipProcessor\n\
BlipForConditionalGeneration.from_pretrained('Salesforce/blip-image-captioning-large').save_pretrained('/models/blip')\n\
BlipProcessor.from_pretrained('Salesforce/blip-image-captioning-large').save_pretrained('/models/blip')\n\n\
# Download CLIP model and processor using huggingface_hub\n\
from transformers import CLIPModel, CLIPProcessor\n\
CLIPModel.from_pretrained('openai/clip-vit-large-patch14').save_pretrained('/models/clip')\n\
CLIPProcessor.from_pretrained('openai/clip-vit-large-patch14').save_pretrained('/models/clip')" > download_models.py
# Run the script to download models
RUN python download_models.py
# Copy the inference script (app.py) into the container
COPY app.py /app/app.py
# Expose the default port for Flask
EXPOSE 7860
# Run the Flask app
CMD ["python", "/app/app.py"] |