ollama_mcp_gradio / demo.launcher
csepartha's picture
Upload demo.launcher
ef6a41d verified
raw
history blame contribute delete
540 Bytes
#!/bin/bash
echo "Starting Ollama API server..."
ollama serve &
# Wait for the Ollama API to be responsive
for i in {1..20}; do
sleep 2
if ollama list; then
echo "Ollama server is responsive!"
break
else
echo "Waiting for Ollama server to become available..."
fi
done
echo "Pulling Ollama model (granite3.1-moe)..."
ollama pull granite3.1-moe
echo "Starting MCP server..."
python3 server.py &
sleep 5
echo "Starting Gradio client..."
python3 client.py --server_name 0.0.0.0 --server_port 7860