Spaces:
Sleeping
Sleeping
File size: 2,341 Bytes
846b823 572b19a 65c9437 1cae688 65c9437 e9fbc1c c638cd1 e9fbc1c 846b823 4b51408 77902b8 6a6e71d 77902b8 69a4674 1827bea 572b19a e9fbc1c 7686cad 71ba2e2 6dfa028 846b823 65c9437 6a6e71d 30e282c 1827bea 8bc1c46 cefb504 9dcf517 8bc1c46 cefb504 9dcf517 65c9437 6a6e71d 65c9437 77902b8 65c9437 77902b8 65c9437 77902b8 572b19a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 |
# Base image with GPU support and TensorFlow pre-installed
FROM tensorflow/tensorflow:2.15.0-gpu
# Install system dependencies
RUN apt-get update && apt-get install -y \
git \
wget \
&& rm -rf /var/lib/apt/lists/*
# Install system-level dependencies for OpenCV
RUN apt-get update && apt-get install -y \
libglib2.0-0 \
libsm6 \
libxext6 \
libxrender-dev \
libgl1-mesa-glx \
&& rm -rf /var/lib/apt/lists/*
# Install Python packages excluding blinker
RUN pip install --no-cache-dir --ignore-installed \
torch \
torchvision \
transformers \
requests \
Flask \
Pillow \
huggingface_hub \
tensorflow==2.15.0 \
tensorflow_hub \
opencv-python
# Set Hugging Face cache to a guaranteed writable directory
ENV TRANSFORMERS_CACHE=/app/cache
ENV HF_HOME=/app/cache
RUN mkdir -p /app/cache && chmod -R 777 /app/cache
# Set up CUDA environment variables
ENV CUDA_VISIBLE_DEVICES=0
ENV NVIDIA_VISIBLE_DEVICES=all
ENV NVIDIA_DRIVER_CAPABILITIES=compute,utility
# Create directories for the models
RUN mkdir -p /models/blip /models/clip
# Python script to download models using tensorflow_hub and huggingface_hub
RUN echo "import os\n\
import tensorflow_hub as hub\n\n\
# Download MoveNet model from TensorFlow Hub (loaded directly in app, not saved)\n\
movenet_model = hub.load('https://tfhub.dev/google/movenet/singlepose/lightning/4')\n\n\
# Download BLIP model and tokenizer using huggingface_hub\n\
from transformers import BlipForConditionalGeneration, BlipProcessor\n\
BlipForConditionalGeneration.from_pretrained('Salesforce/blip-image-captioning-base').save_pretrained('/models/blip')\n\
BlipProcessor.from_pretrained('Salesforce/blip-image-captioning-base').save_pretrained('/models/blip')\n\n\
# Download CLIP model and processor using huggingface_hub\n\
from transformers import CLIPModel, CLIPProcessor\n\
CLIPModel.from_pretrained('openai/clip-vit-base-patch32').save_pretrained('/models/clip')\n\
CLIPProcessor.from_pretrained('openai/clip-vit-base-patch32').save_pretrained('/models/clip')" > download_models.py
# Run the script to download models
RUN python download_models.py
# Copy the inference script (app.py) into the container
COPY app.py /app/app.py
# Expose the default port for Flask
EXPOSE 7860
# Run the Flask app
CMD ["python", "/app/app.py"] |