Spaces:
Sleeping
Sleeping
# Base image with PyTorch and CUDA for GPU support | |
FROM pytorch/pytorch:1.10.0-cuda11.3-cudnn8-runtime | |
# Install system dependencies and Hugging Face CLI | |
RUN apt-get update && apt-get install -y \ | |
git \ | |
wget \ | |
&& rm -rf /var/lib/apt/lists/* | |
# Install Python packages including Hugging Face Transformers, TorchScript, and Flask | |
RUN pip install --no-cache-dir \ | |
torch \ | |
torchvision \ | |
transformers[cli] \ | |
requests \ | |
Flask \ | |
Pillow | |
# Set Hugging Face cache to a guaranteed writable directory | |
ENV TRANSFORMERS_CACHE=/tmp/cache | |
RUN mkdir -p /tmp/cache | |
# Download models using Hugging Face CLI and place them in specified directories | |
RUN mkdir -p /models/sapiens_pose /models/motionbert | |
# Download Meta Sapiens Pose model to /models/sapiens_pose | |
RUN huggingface-cli download facebook/sapiens-pose-1b-torchscript -d /models/sapiens_pose | |
# Download MotionBERT model to /models/motionbert | |
RUN huggingface-cli download walterzhu/MotionBERT -d /models/motionbert | |
# Copy the inference script (app.py) into the container | |
COPY app.py /app/app.py | |
# Expose the default port for Flask | |
EXPOSE 7860 | |
# Run the Flask app | |
CMD ["python", "/app/app.py"] | |