#!/bin/bash

# Ollama安装和模型加载脚本

# 获取脚本所在目录的绝对路径
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
# 获取项目根目录
PROJECT_ROOT="$( cd "$SCRIPT_DIR/../.." && pwd )"
# 配置文件路径
CONFIG_FILE="$PROJECT_ROOT/video2text-backend/config/ollama.yaml"

echo "检查Ollama环境..."

# 检查配置文件
if [ ! -f "$CONFIG_FILE" ]; then
    echo "错误: 未找到配置文件 $CONFIG_FILE"
    exit 1
fi

# 读取配置
eval $(python3 -c "
import yaml
with open('$CONFIG_FILE') as f:
    config = yaml.safe_load(f)
print(f'OLLAMA_HOST={config[\"host\"]}:{config[\"port\"]}')
print(f'OLLAMA_MODELS={config[\"models_dir\"]}')
print(f'MODEL_NAME={config[\"model\"][\"name\"]}')
")

# 检查是否已安装Ollama
if command -v ollama &> /dev/null; then
    echo "Ollama已安装"
else
    echo "开始安装Ollama..."
    curl https://ollama.ai/install.sh | sh
    
    # 检查安装结果
    if ! command -v ollama &> /dev/null; then
        echo "错误: Ollama安装失败"
        exit 1
    fi
    echo "Ollama安装成功"
fi

# 设置环境变量
export OLLAMA_HOST
export OLLAMA_MODELS

# 创建模型目录
mkdir -p "$OLLAMA_MODELS"

# 检查Ollama服务状态
if [ `ps -elf |grep "ollama serve" | grep -v grep | wc -l`a == "1"a ]; then
    echo "Ollama服务已运行....."
else
    echo "启动Ollama服务..."
    nohup ollama serve &

    # 等待服务启动
    sleep 15
    # 验证服务状态
    if [ `ps -elf |grep "ollama serve" | grep -v grep | wc -l`a == "1"a ]; then
        echo "Ollama服务启动成功"
    else
        echo "错误: Ollama服务启动失败"
        exit 1
    fi
fi

# 检查模型是否已存在
if ollama list | grep -q "$MODEL_NAME"; then
    echo "$MODEL_NAME模型已存在"
else
    echo "下载${MODEL_NAME}模型..."
    ollama pull "$MODEL_NAME"
    
    # 验证模型下载
    if ! ollama list | grep -q "$MODEL_NAME"; then
        echo "错误: 模型下载失败"
        exit 1
    fi
    echo "模型下载成功"
fi

echo "Ollama环境检查完成!"

# 显示当前状态
echo "-------------------"
echo "Ollama版本:"
ollama --version
echo "Ollama服务地址: $OLLAMA_HOST"
echo "模型目录: $OLLAMA_MODELS"
echo "已安装的模型:"
ollama list
echo "-------------------" 