#!/bin/bash

# 1. 检查系统是否是 Ubuntu 22.04
# echo "检查系统版本..."
# if [ -f /etc/os-release ]; then
#     . /etc/os-release
#     if [ "$VERSION_ID" != "22.04" ]; then
#         echo "错误：本脚本仅支持Ubuntu 22.04。"
#         exit 1
#     fi
# else
#     echo "无法确定系统版本。"
#     exit 1
# fi

# 2. 检查CUDA版本是否在12.4以上
echo "检查CUDA版本..."
cuda_version=$(nvidia-smi 2>/dev/null | grep -oP 'CUDA Version: \K[0-9.]+')
if [ -z "$cuda_version" ]; then
    echo "错误：未检测到CUDA安装。"
    exit 1
fi

# 比较版本号
function version_gt() {
    local ver1=$1
    local ver2=$2
    if [[ "$ver1" == "$ver2" ]]; then
        return 1
    fi
    local IFS=.
    local i ver1_arr=($ver1) ver2_arr=($ver2)
    for ((i=${#ver1_arr[@]}; i<${#ver2_arr[@]}; i++)); do
        ver1_arr[i]=0
    done
    for ((i=0; i<${#ver1_arr[@]}; i++)); do
        if [[ -z ${ver2_arr[i]} ]]; then
            ver2_arr[i]=0
        fi
        if ((10#${ver1_arr[i]} > 10#${ver2_arr[i]})); then
            return 0
        fi
    done
    return 1
}

if ! version_gt "$cuda_version" "12.4"; then
    echo "错误：CUDA版本必须大于等于12.4。当前版本为: $cuda_version"
    exit 1
fi

# 3. 检查是否安装了Docker
echo "检查Docker..."
if ! command -v docker &> /dev/null; then
    echo "错误：未检测到Docker安装。请先安装Docker。"
    exit 1
fi

# 检查Docker权限
echo "正在检查Docker权限..."
if ! docker ps > /dev/null 2>&1; then
    echo "错误：当前用户没有足够的Docker权限。请确保你属于docker组，并且Docker服务正在运行。"
    exit 1
fi

# 4. 获取GPU显存大小（MB）
gpu_memory_mb=$(nvidia-smi --query-gpu=memory.total --format=csv,noheader,nounits | awk '{print $1}')
#gpu_memory_mb=$(nvidia-smi --query-gpu=memory.total --format=csv,noheader | head -n1)
gpu_memory_gb=$((gpu_memory_mb / 1024))
echo "检测到的GPU显存为: ${gpu_memory_gb} GB"

# 5. 用户输入挂载路径
read -p "请输入Docker挂载的本地路径（默认为当前目录）: " mount_path
mount_path=${mount_path:-$(pwd)}

# 确保路径存在
if [ ! -d "$mount_path" ]; then
    mkdir -p "$mount_path"
fi

echo "使用挂载路径: $mount_path"

stty -icanon min 1
# 提示用户输入绑定的端口号，默认为3000
read -p "请输入您要绑定的端口号（默认为3000）：" user_port

# 如果没有输入，则设置默认值3000
if [ -z "$user_port" ]; then
    user_port=3000
fi

# 验证端口号是否合法
if ! [[ "$user_port" =~ ^[0-9]+$ ]] || [ "$user_port" -lt 1 ] || [ "$user_port" -gt 65535 ]; then
    echo "错误：端口号无效，请输入一个介于1到65535之间的整数。"
    exit 1
fi

# 6. 运行Docker容器
docker run -d \
    -p ${user_port}:8080 \
    --gpus=all \
    -v "$mount_path/ollama:/root/.ollama" \
    -v "$mount_path/open-webui:/app/backend/data" \
    --name open-webui \
    --restart always \
    ghcr.nju.edu.cn/open-webui/open-webui:ollama

if [ "$gpu_memory_gb" -ge 18 ]; then
    default_choice=5
elif [ "$gpu_memory_gb" -ge 12 ]; then
    default_choice=4
elif [ "$gpu_memory_gb" -ge 6 ]; then
    default_choice=3
else
    default_choice=1
fi

stty -icanon min 1

echo "请选择模型（输入数字）："
echo "1) DeepSeek-R1-1.5B (4-bit)"
echo "2) DeepSeek-R1-7B (4-bit)"
echo "3) DeepSeek-R1-8B (4-bit)"
echo "4) DeepSeek-R1-14B (4-bit)"
echo "5) DeepSeek-R1-32B (4-bit)"
read -p "[默认：$default_choice] 你的选择（1-5）： " choice
choice=${choice:-$default_choice}

case $choice in
    1)
        model_url="modelscope.cn/unsloth/DeepSeek-R1-Distill-Qwen-1.5B-GGUF:DeepSeek-R1-Distill-Qwen-1.5B-Q4_K_M.gguf"
        model_name="DeepSeek-R1-1.5B"
        ;;
    2)
        model_url="modelscope.cn/unsloth/DeepSeek-R1-Distill-Qwen-7B-GGUF:DeepSeek-R1-Distill-Qwen-7B-Q4_K_M.gguf"
        model_name="DeepSeek-R1-7B"
        ;;
    3)
        model_url="modelscope.cn/unsloth/DeepSeek-R1-Distill-Llama-8B-GGUF:DeepSeek-R1-Distill-Llama-8B-Q4_K_M.gguf"
        model_name="DeepSeek-R1-8B"
        ;;
    4)
        model_url="modelscope.cn/unsloth/DeepSeek-R1-Distill-Qwen-14B-GGUF:DeepSeek-R1-Distill-Qwen-14B-Q4_K_M.gguf"
        model_name="DeepSeek-R1-14B"
        ;;
    5)
        model_url="modelscope.cn/unsloth/DeepSeek-R1-Distill-Qwen-32B-GGUF:DeepSeek-R1-Distill-Qwen-32B-Q4_K_M.gguf"
        model_name="DeepSeek-R1-32B"
        ;;
esac

echo "正在下载模型：$model_name"
docker exec -it open-webui ollama pull $model_url
docker exec -it open-webui ollama cp  $model_url $model_name
docker exec -it open-webui ollama rm $model_url

echo "Ollama和openWebUI已启动，访问http://服务器IP:${user_port}即可使用。"