#!/bin/bash
# 一键部署所有模型服务
# 模型应放置在 /data1/model 目录下
# 每个模型使用独立的NPU设备（davinci0-3）

set -e

MODEL_BASE_PATH="/data1/model"
CONTAINER_BASE_PATH="/app/models"

echo "开始部署所有模型服务..."

# 检查基础镜像可用性
echo "检查基础镜像可用性..."
echo "使用基础镜像: quay.io/ascend/vllm-ascend:v0.11.0rc0"
echo "该镜像公开可用，无需登录SWR"

# 检查模型目录
if [ ! -d "$MODEL_BASE_PATH" ]; then
    echo "错误: 模型目录 $MODEL_BASE_PATH 不存在"
    exit 1
fi

# 检查NPU设备
echo "检查NPU设备..."
npu-smi info || echo "警告: 无法执行npu-smi，请确保已安装昇腾驱动"

# 检查Ascend CANN环境
echo "检查Ascend CANN环境..."
ASCEND_DRIVER="/usr/local/Ascend/driver"
ASCEND_DRIVER_LIB="/usr/local/Ascend/driver/lib64"
ASCEND_DRIVER_VERSION="/usr/local/Ascend/driver/version.info"
ASCEND_TOOLKIT="/usr/local/Ascend/ascend-toolkit"
ASCEND_INSTALL_INFO="/etc/ascend_install.info"
DCMI_PATH="/usr/local/dcmi"
NPU_SMI="/usr/local/bin/npu-smi"

# 检查关键路径和文件
MISSING_PATHS=()

if [ ! -d "$ASCEND_DRIVER" ]; then
    echo "警告: 未找到 $ASCEND_DRIVER 目录"
    MISSING_PATHS+=("$ASCEND_DRIVER")
else
    echo "✓ 找到驱动目录: $ASCEND_DRIVER"
    # 检查关键库文件
    if [ -d "$ASCEND_DRIVER_LIB" ]; then
        if find "$ASCEND_DRIVER_LIB" -name "libascend_hal.so" 2>/dev/null | head -1 | grep -q .; then
            echo "✓ 找到 libascend_hal.so"
        else
            echo "警告: 在 $ASCEND_DRIVER_LIB 中未找到 libascend_hal.so"
        fi
    fi
fi

if [ ! -d "$ASCEND_TOOLKIT" ]; then
    echo "提示: 未找到 $ASCEND_TOOLKIT 目录（可选，但推荐挂载）"
else
    echo "✓ 找到CANN工具包目录: $ASCEND_TOOLKIT"
fi

if [ ! -f "$ASCEND_DRIVER_VERSION" ]; then
    echo "警告: 未找到 $ASCEND_DRIVER_VERSION 文件"
    MISSING_PATHS+=("$ASCEND_DRIVER_VERSION")
else
    echo "✓ 找到驱动版本信息: $ASCEND_DRIVER_VERSION"
fi

if [ ! -f "$ASCEND_INSTALL_INFO" ]; then
    echo "警告: 未找到 $ASCEND_INSTALL_INFO 文件"
    MISSING_PATHS+=("$ASCEND_INSTALL_INFO")
else
    echo "✓ 找到安装信息: $ASCEND_INSTALL_INFO"
fi

if [ ! -d "$DCMI_PATH" ]; then
    echo "提示: 未找到 $DCMI_PATH 目录（可选）"
else
    echo "✓ 找到DCMI路径: $DCMI_PATH"
fi

if [ ! -f "$NPU_SMI" ]; then
    echo "提示: 未找到 $NPU_SMI 工具（可选）"
else
    echo "✓ 找到 npu-smi 工具: $NPU_SMI"
fi

if [ ${#MISSING_PATHS[@]} -gt 0 ]; then
    echo ""
    echo "警告: 以下路径未找到，容器可能无法正常访问NPU运行时库："
    for path in "${MISSING_PATHS[@]}"; do
        echo "  - $path"
    done
    echo "      这可能导致 libascend_hal.so 错误"
    echo ""
fi
# 部署 Gemma-3-270M - 使用 PyTorch + torch.compile 优化，CPU 推理
# 注意：Gemma-3 目前没有 TensorFlow 实现，使用 PyTorch + torch.compile（类似 LiteRT 优化）
echo "部署 Gemma-3-270M (PyTorch + torch.compile 优化，CPU 推理)..."
cd gemma-3-270m
docker build -t gemma-3-270m-pytorch:latest .
docker stop gemma-3-270m-api 2>/dev/null || true
docker rm gemma-3-270m-api 2>/dev/null || true
docker run -d \
  --name gemma-3-270m-api \
  --restart=unless-stopped \
  -p 18003:8000 \
  -v ${MODEL_BASE_PATH}/gemma-3-270m:${CONTAINER_BASE_PATH}/gemma-3-270m:ro \
  -e LOCAL_MODEL_PATH=${CONTAINER_BASE_PATH}/gemma-3-270m \
  -e MODEL_CACHE_DIR=${CONTAINER_BASE_PATH} \
  -e LOG_LEVEL=INFO \
  gemma-3-270m-pytorch:latest
cd ..

# 部署 gemma-3-270m - 使用 davinci0
echo "部署 finbert (NPU: davinci0)..."
cd finbert
docker build -t finbert-ascend:latest .
docker stop finbert-api 2>/dev/null || true
docker rm finbert-api 2>/dev/null || true
docker run -d \
  --name finbert-api \
  --device=/dev/davinci0 \
  $(if [ -c /dev/davinci_manager ]; then echo "--device=/dev/davinci_manager"; fi) \
  $(if [ -c /dev/devmm_svm ]; then echo "--device=/dev/devmm_svm"; fi) \
  $(if [ -c /dev/hisi_hdc ]; then echo "--device=/dev/hisi_hdc"; fi) \
  --restart=unless-stopped \
  -p 18001:8000 \
  -v ${MODEL_BASE_PATH}:${CONTAINER_BASE_PATH} \
  $(if [ -d "$DCMI_PATH" ]; then echo "-v ${DCMI_PATH}:${DCMI_PATH}:ro"; fi) \
  $(if [ -f "$NPU_SMI" ]; then echo "-v ${NPU_SMI}:${NPU_SMI}:ro"; fi) \
  $(if [ -d "$ASCEND_DRIVER" ]; then echo "-v ${ASCEND_DRIVER}:${ASCEND_DRIVER}:ro"; fi) \
  $(if [ -d "$ASCEND_TOOLKIT" ]; then echo "-v ${ASCEND_TOOLKIT}:${ASCEND_TOOLKIT}:ro"; fi) \
  $(if [ -f "$ASCEND_INSTALL_INFO" ]; then echo "-v ${ASCEND_INSTALL_INFO}:${ASCEND_INSTALL_INFO}:ro"; fi) \
  -e LOCAL_MODEL_PATH=${CONTAINER_BASE_PATH}/finbert \
  -e NPU_VISIBLE_DEVICES=0 \
  -e ASCEND_RT_VISIBLE_DEVICES=0 \
  -e LD_LIBRARY_PATH=/usr/local/Ascend/driver/lib64:/usr/local/Ascend/driver/lib64/stub:/usr/local/Ascend/add-ons \
  finbert-ascend:latest

cd ..
echo ""
echo "所有服务部署完成！"
echo ""
echo "NPU分配情况:"
echo "  Gemma-3-270M    -> PyTorch + torch.compile (CPU 推理)  -> 端口 18003"
echo "  finbert    -> NPU 0 (davinci0)  -> 端口 8002"
echo ""
echo "服务状态:"
docker ps | grep -E "gemma-3-270m-api|finbert-api"
echo ""
echo "API文档:"
echo "  Gemma-3-270M (PyTorch + torch.compile): http://localhost:8001/docs"
echo "  FinBERT: http://localhost:8002/docs"
echo ""
echo "查看NPU使用情况:"
echo "  npu-smi info"

