FROM runpod/pytorch:2.1.0-py3.10-cuda11.8.0-devel-ubuntu22.04 # Install Ollama RUN curl -fsSL https://ollama.com/install.sh | sh # Set Ollama to listen on all interfaces ENV OLLAMA_HOST=0.0.0.0 # Expose Ollama port EXPOSE 11434 # Create a script to start Ollama and pull models RUN echo '#!/bin/bash\n\ ollama serve &\n\ sleep 5\n\ ollama pull $MODEL\n\ ollama pull $MODEL2\n\ tail -f /dev/null\n\ ' > /start.sh && chmod +x /start.sh # Set the entrypoint to our start script ENTRYPOINT ["/start.sh"]