|
#!/bin/bash |
|
|
|
|
|
BOLD='\033[1m' |
|
GREEN='\033[1;32m' |
|
WHITE='\033[1;37m' |
|
RED='\033[0;31m' |
|
NC='\033[0m' |
|
|
|
TICK='\u2713' |
|
|
|
|
|
get_gpu_driver() { |
|
|
|
if lspci | grep -i nvidia >/dev/null || nvidia-smi >/dev/null 2>&1; then |
|
echo "nvidia" |
|
return |
|
fi |
|
|
|
|
|
if lspci | grep -i amd >/dev/null; then |
|
|
|
|
|
local gcn_and_later=("Radeon HD 7000" "Radeon HD 8000" "Radeon R5" "Radeon R7" "Radeon R9" "Radeon RX") |
|
|
|
|
|
local gpu_info=$(lspci | grep -i 'vga.*amd') |
|
|
|
for model in "${gcn_and_later[@]}"; do |
|
if echo "$gpu_info" | grep -iq "$model"; then |
|
echo "amdgpu" |
|
return |
|
fi |
|
done |
|
|
|
|
|
echo "radeon" |
|
return |
|
fi |
|
|
|
|
|
if lspci | grep -i intel >/dev/null; then |
|
echo "i915" |
|
return |
|
fi |
|
|
|
|
|
echo "Unknown or unsupported GPU driver" |
|
exit 1 |
|
} |
|
|
|
|
|
show_loading() { |
|
local spin='-\|/' |
|
local i=0 |
|
|
|
printf " " |
|
|
|
while kill -0 $1 2>/dev/null; do |
|
i=$(( (i+1) %4 )) |
|
printf "\b${spin:$i:1}" |
|
sleep .1 |
|
done |
|
|
|
|
|
printf "\b${GREEN}${TICK}${NC}" |
|
} |
|
|
|
|
|
usage() { |
|
echo "Usage: $0 [OPTIONS]" |
|
echo "Options:" |
|
echo " --enable-gpu[count=COUNT] Enable GPU support with the specified count." |
|
echo " --enable-api[port=PORT] Enable API and expose it on the specified port." |
|
echo " --webui[port=PORT] Set the port for the web user interface." |
|
echo " --data[folder=PATH] Bind mount for ollama data folder (by default will create the 'ollama' volume)." |
|
echo " --build Build the docker image before running the compose project." |
|
echo " --drop Drop the compose project." |
|
echo " -q, --quiet Run script in headless mode." |
|
echo " -h, --help Show this help message." |
|
echo "" |
|
echo "Examples:" |
|
echo " $0 --drop" |
|
echo " $0 --enable-gpu[count=1]" |
|
echo " $0 --enable-gpu[count=all]" |
|
echo " $0 --enable-api[port=11435]" |
|
echo " $0 --enable-gpu[count=1] --enable-api[port=12345] --webui[port=3000]" |
|
echo " $0 --enable-gpu[count=1] --enable-api[port=12345] --webui[port=3000] --data[folder=./ollama-data]" |
|
echo " $0 --enable-gpu[count=1] --enable-api[port=12345] --webui[port=3000] --data[folder=./ollama-data] --build" |
|
echo "" |
|
echo "This script configures and runs a docker-compose setup with optional GPU support, API exposure, and web UI configuration." |
|
echo "About the gpu to use, the script automatically detects it using the "lspci" command." |
|
echo "In this case the gpu detected is: $(get_gpu_driver)" |
|
} |
|
|
|
|
|
gpu_count=1 |
|
api_port=11435 |
|
webui_port=3000 |
|
headless=false |
|
build_image=false |
|
kill_compose=false |
|
|
|
|
|
extract_value() { |
|
echo "$1" | sed -E 's/.*\[.*=(.*)\].*/\1/; t; s/.*//' |
|
} |
|
|
|
|
|
while [[ $# -gt 0 ]]; do |
|
key="$1" |
|
|
|
case $key in |
|
--enable-gpu*) |
|
enable_gpu=true |
|
value=$(extract_value "$key") |
|
gpu_count=${value:-1} |
|
;; |
|
--enable-api*) |
|
enable_api=true |
|
value=$(extract_value "$key") |
|
api_port=${value:-11435} |
|
;; |
|
--webui*) |
|
value=$(extract_value "$key") |
|
webui_port=${value:-3000} |
|
;; |
|
--data*) |
|
value=$(extract_value "$key") |
|
data_dir=${value:-"./ollama-data"} |
|
;; |
|
--drop) |
|
kill_compose=true |
|
;; |
|
--build) |
|
build_image=true |
|
;; |
|
-q|--quiet) |
|
headless=true |
|
;; |
|
-h|--help) |
|
usage |
|
exit |
|
;; |
|
*) |
|
|
|
echo "Unknown option: $key" |
|
usage |
|
exit 1 |
|
;; |
|
esac |
|
shift |
|
done |
|
|
|
if [[ $kill_compose == true ]]; then |
|
docker compose down --remove-orphans |
|
echo -e "${GREEN}${BOLD}Compose project dropped successfully.${NC}" |
|
exit |
|
else |
|
DEFAULT_COMPOSE_COMMAND="docker compose -f docker-compose.yaml" |
|
if [[ $enable_gpu == true ]]; then |
|
|
|
if [[ -n $gpu_count ]]; then |
|
if ! [[ $gpu_count =~ ^([0-9]+|all)$ ]]; then |
|
echo "Invalid GPU count: $gpu_count" |
|
exit 1 |
|
fi |
|
echo "Enabling GPU with $gpu_count GPUs" |
|
|
|
export OLLAMA_GPU_DRIVER=$(get_gpu_driver) |
|
export OLLAMA_GPU_COUNT=$gpu_count |
|
fi |
|
DEFAULT_COMPOSE_COMMAND+=" -f docker-compose.gpu.yaml" |
|
fi |
|
if [[ $enable_api == true ]]; then |
|
DEFAULT_COMPOSE_COMMAND+=" -f docker-compose.api.yaml" |
|
if [[ -n $api_port ]]; then |
|
export OLLAMA_WEBAPI_PORT=$api_port |
|
fi |
|
fi |
|
if [[ -n $data_dir ]]; then |
|
DEFAULT_COMPOSE_COMMAND+=" -f docker-compose.data.yaml" |
|
export OLLAMA_DATA_DIR=$data_dir |
|
fi |
|
if [[ -n $webui_port ]]; then |
|
export OPEN_WEBUI_PORT=$webui_port |
|
fi |
|
DEFAULT_COMPOSE_COMMAND+=" up -d" |
|
DEFAULT_COMPOSE_COMMAND+=" --remove-orphans" |
|
DEFAULT_COMPOSE_COMMAND+=" --force-recreate" |
|
if [[ $build_image == true ]]; then |
|
DEFAULT_COMPOSE_COMMAND+=" --build" |
|
fi |
|
fi |
|
|
|
|
|
echo |
|
echo -e "${WHITE}${BOLD}Current Setup:${NC}" |
|
echo -e " ${GREEN}${BOLD}GPU Driver:${NC} ${OLLAMA_GPU_DRIVER:-Not Enabled}" |
|
echo -e " ${GREEN}${BOLD}GPU Count:${NC} ${OLLAMA_GPU_COUNT:-Not Enabled}" |
|
echo -e " ${GREEN}${BOLD}WebAPI Port:${NC} ${OLLAMA_WEBAPI_PORT:-Not Enabled}" |
|
echo -e " ${GREEN}${BOLD}Data Folder:${NC} ${data_dir:-Using ollama volume}" |
|
echo -e " ${GREEN}${BOLD}WebUI Port:${NC} $webui_port" |
|
echo |
|
|
|
if [[ $headless == true ]]; then |
|
echo -ne "${WHITE}${BOLD}Running in headless mode... ${NC}" |
|
choice="y" |
|
else |
|
|
|
echo -ne "${WHITE}${BOLD}Do you want to proceed with current setup? (Y/n): ${NC}" |
|
read -n1 -s choice |
|
fi |
|
|
|
echo |
|
|
|
if [[ $choice == "" || $choice == "y" ]]; then |
|
|
|
eval "$DEFAULT_COMPOSE_COMMAND" & |
|
|
|
|
|
PID=$! |
|
|
|
|
|
|
|
|
|
|
|
wait $PID |
|
|
|
echo |
|
|
|
if [ $? -eq 0 ]; then |
|
echo -e "${GREEN}${BOLD}Compose project started successfully.${NC}" |
|
else |
|
echo -e "${RED}${BOLD}There was an error starting the compose project.${NC}" |
|
fi |
|
else |
|
echo "Aborted." |
|
fi |
|
|
|
echo |
|
|