#!/bin/bash
set -euo pipefail
BaseDir=$(cd "$(dirname "$0")"; pwd)
cd ${BaseDir}

case "${1-}" in
init)
docker -v || {
    echo "install docker..."
    apt update

    apt install apt-transport-https ca-certificates curl software-properties-common -y
    curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add - ||true
    add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
    apt update

    apt install docker-ce -y
    
    docker -v
    tee /etc/docker/daemon.json <<-'EOF'
{
  "registry-mirrors": ["https://2mt5bmc8.mirror.aliyuncs.com"],
  "data-root": "/mnt/workspace/vols",
  "iptables": false,
  "ip6tables": false
}
EOF
    # dockerd --storage-driver vfs --bridge=none
    
# https://cr.console.aliyun.com/repository/cn-beijing/wushifeng/thingstrue/images
# docker pull registry.cn-beijing.aliyuncs.com/wushifeng/thingstrue:alpine_3.18
# docker run -it --rm registry.cn-beijing.aliyuncs.com/wushifeng/thingstrue:alpine_3.18
}

;;
#
udocker)   
    # useradd test 只能是个普通的用户执行udocker
    cat /etc/passwd|grep 'udocker' || {
        echo "add udocker user"
        useradd udocker        
        # passwd udocker
        echo "udocker:udocker" | chpasswd
        mkdir -p /home/udocker
        chown -R udocker /home/udocker
        chgrp -R udocker /home/udocker
         #pip config -v list
         pip install udocker
         
        cat <<EOF
su udocker 切到这个普通用户，进行操作

EOF
    }
    # udocker pull registry.cn-beijing.aliyuncs.com/wushifeng/thingstrue:alpine_3.18
    # udocker run --name=test registry.cn-beijing.aliyuncs.com/wushifeng/thingstrue:alpine_3.18 bash
    
    # udocker run test bash 
;;

#
ollama)
    # https://github.com/ollama/ollama ollama run llama3.2:1b    
    # 可以设置的环境变量 https://github.com/ollama/ollama/blob/main/envconfig/config.go
:<<EOF
		"OLLAMA_DEBUG":             {"OLLAMA_DEBUG", Debug(), "Show additional debug information (e.g. OLLAMA_DEBUG=1)"},
		"OLLAMA_FLASH_ATTENTION":   {"OLLAMA_FLASH_ATTENTION", FlashAttention(), "Enabled flash attention"},
		"OLLAMA_KV_CACHE_TYPE":     {"OLLAMA_KV_CACHE_TYPE", KvCacheType(), "Quantization type for the K/V cache (default: f16)"},
		"OLLAMA_GPU_OVERHEAD":      {"OLLAMA_GPU_OVERHEAD", GpuOverhead(), "Reserve a portion of VRAM per GPU (bytes)"},
		"OLLAMA_HOST":              {"OLLAMA_HOST", Host(), "IP Address for the ollama server (default 127.0.0.1:11434)"},
		"OLLAMA_KEEP_ALIVE":        {"OLLAMA_KEEP_ALIVE", KeepAlive(), "The duration that models stay loaded in memory (default \"5m\")"},
		"OLLAMA_LLM_LIBRARY":       {"OLLAMA_LLM_LIBRARY", LLMLibrary(), "Set LLM library to bypass autodetection"},
		"OLLAMA_LOAD_TIMEOUT":      {"OLLAMA_LOAD_TIMEOUT", LoadTimeout(), "How long to allow model loads to stall before giving up (default \"5m\")"},
		"OLLAMA_MAX_LOADED_MODELS": {"OLLAMA_MAX_LOADED_MODELS", MaxRunners(), "Maximum number of loaded models per GPU"},
		"OLLAMA_MAX_QUEUE":         {"OLLAMA_MAX_QUEUE", MaxQueue(), "Maximum number of queued requests"},
		"OLLAMA_MODELS":            {"OLLAMA_MODELS", Models(), "The path to the models directory"},
		"OLLAMA_NOHISTORY":         {"OLLAMA_NOHISTORY", NoHistory(), "Do not preserve readline history"},
		"OLLAMA_NOPRUNE":           {"OLLAMA_NOPRUNE", NoPrune(), "Do not prune model blobs on startup"},
		"OLLAMA_NUM_PARALLEL":      {"OLLAMA_NUM_PARALLEL", NumParallel(), "Maximum number of parallel requests"},
		"OLLAMA_ORIGINS":           {"OLLAMA_ORIGINS", Origins(), "A comma separated list of allowed origins"},
		"OLLAMA_SCHED_SPREAD":      {"OLLAMA_SCHED_SPREAD", SchedSpread(), "Always schedule model across all GPUs"},
		"OLLAMA_MULTIUSER_CACHE":   {"OLLAMA_MULTIUSER_CACHE", MultiUserCache(), "Optimize prompt caching for multi-user scenarios"},

		// Informational
		"HTTP_PROXY":  {"HTTP_PROXY", String("HTTP_PROXY")(), "HTTP proxy"},
		"HTTPS_PROXY": {"HTTPS_PROXY", String("HTTPS_PROXY")(), "HTTPS proxy"},
		"NO_PROXY":    {"NO_PROXY", String("NO_PROXY")(), "No proxy"},
EOF
    echo "启动ollama 服务"
    
    export OLLAMA_MODELS=/mnt/workspace/vols; ollama serve &
    sleep 3
    
    echo "运行lamma3.2 10亿的模型"
    export OLLAMA_MODELS=/mnt/workspace/vols; ollama run llama3.2:1b    
;;

#
*)
 echo "ollama:  ./init.sh ollama"
;;
esac


