Rahmans_LLM-Chat / entrypoint.sh
ruslanmv's picture
Update entrypoint.sh
1a1b743
raw
history blame
394 Bytes
#!/bin/bash
# Source the virtual environment
source /app/venv/bin/activate
# Starting server
echo "Starting Ollama server"
ollama serve &
sleep 1
# Splitting the models by comma and pulling each
IFS=',' read -ra MODELS <<< "$model"
for m in "${MODELS[@]}"; do
echo "Pulling $m"
ollama pull "$m"
sleep 5
done
# Keep the script running to prevent the container from exiting
#wait