|
|
|
FROM tensorflow/tensorflow:2.15.0-gpu |
|
|
|
|
|
RUN apt-get update && apt-get install -y \ |
|
git \ |
|
wget \ |
|
&& rm -rf /var/lib/apt/lists/* |
|
|
|
|
|
RUN apt-get update && apt-get install -y \ |
|
libglib2.0-0 \ |
|
libsm6 \ |
|
libxext6 \ |
|
libxrender-dev \ |
|
libgl1-mesa-glx \ |
|
&& rm -rf /var/lib/apt/lists/* |
|
|
|
|
|
RUN pip install --no-cache-dir --ignore-installed \ |
|
torch \ |
|
torchvision \ |
|
transformers \ |
|
requests \ |
|
Flask \ |
|
Pillow \ |
|
huggingface_hub \ |
|
tensorflow==2.15.0 \ |
|
tensorflow_hub \ |
|
opencv-python |
|
|
|
|
|
ENV TRANSFORMERS_CACHE=/app/cache |
|
ENV HF_HOME=/app/cache |
|
RUN mkdir -p /app/cache && chmod -R 777 /app/cache |
|
|
|
|
|
ENV CUDA_VISIBLE_DEVICES=0 |
|
ENV NVIDIA_VISIBLE_DEVICES=all |
|
ENV NVIDIA_DRIVER_CAPABILITIES=compute,utility |
|
|
|
|
|
RUN mkdir -p /models/blip /models/clip |
|
|
|
|
|
RUN echo "import os\n\ |
|
import tensorflow_hub as hub\n\n\ |
|
# Download MoveNet model from TensorFlow Hub (loaded directly in app, not saved)\n\ |
|
movenet_model = hub.load('https://tfhub.dev/google/movenet/singlepose/lightning/4')\n\n\ |
|
# Download BLIP model and tokenizer using huggingface_hub\n\ |
|
from transformers import BlipForConditionalGeneration, BlipProcessor\n\ |
|
BlipForConditionalGeneration.from_pretrained('Salesforce/blip-image-captioning-base').save_pretrained('/models/blip')\n\ |
|
BlipProcessor.from_pretrained('Salesforce/blip-image-captioning-base').save_pretrained('/models/blip')\n\n\ |
|
# Download CLIP model and processor using huggingface_hub\n\ |
|
from transformers import CLIPModel, CLIPProcessor\n\ |
|
CLIPModel.from_pretrained('openai/clip-vit-base-patch32').save_pretrained('/models/clip')\n\ |
|
CLIPProcessor.from_pretrained('openai/clip-vit-base-patch32').save_pretrained('/models/clip')" > download_models.py |
|
|
|
|
|
RUN python download_models.py |
|
|
|
|
|
COPY app.py /app/app.py |
|
|
|
|
|
EXPOSE 7860 |
|
|
|
|
|
CMD ["python", "/app/app.py"] |