makkzone's picture
Create start.sh
4d55dfd verified
raw
history blame
567 Bytes
#!/bin/bash
# Start Ollama in the background
ollama serve &
# Wait for Ollama to be ready (check if port 11434 is open)
echo "Waiting for Ollama to start..."
timeout 30s bash -c "until curl -s http://localhost:11434 > /dev/null; do sleep 1; done"
if [ $? -eq 0 ]; then
echo "Ollama is running."
else
echo "Failed to start Ollama within 30 seconds."
exit 1
fi
# Pull the model (llama3.2)
echo "Pulling llama3.2 model..."
ollama pull llama3.2
# Start Streamlit
echo "Starting Streamlit..."
streamlit run app.py --server.port 8501 --server.address 0.0.0.0