# Ollama大语言模型Linux部署与运维管理Makefile

# 默认参数配置
SERVER_NAME := ollama
OLLAMA_PORT := 11434
OLLAMA_HOST := 127.0.0.1
OLLAMA_API := http://$(OLLAMA_HOST):$(OLLAMA_PORT)
OLLAMA_BIN := ~/local/bin/ollama
OLLAMA_MODELS_DIR := $(HOME)/.ollama/models
DEFAULT_MODEL := gemma3:27b-it-fp16

# 安装和部署相关变量（暂不验证）
INSTALL_DIR := ~/local/bin
OLLAMA_RELEASE_URL := https://github.com/ollama/ollama/releases/latest/download/ollama-linux-amd64

# 查看服务PID、端口和IP信息
.PHONY: status
status:
	@echo "===== $(SERVER_NAME) 进程和网络信息 ====="
	@echo "进程信息 (PID):"
	@PID=`ps aux | grep "$(SERVER_NAME)" | grep -v grep | awk '{print $$2}'`; \
	if [ -n "$$PID" ]; then \
		echo "查找到进程:"; \
		ps aux | grep "$(SERVER_NAME)" | grep -v grep | awk '{print "PID:" $$2, "用户:" $$1, "CPU:" $$3"%", "内存:" $$4"%", "启动时间:" $$9}'; \
	else \
		echo "未找到 $(SERVER_NAME) 相关进程"; \
	fi
	
	@echo "\n端口信息 ($(OLLAMA_PORT)):"
	@ss -tuln | grep $(OLLAMA_PORT) > /dev/null; \
	if [ $$? -eq 0 ]; then \
		echo "端口 $(OLLAMA_PORT) 监听状态:"; \
		ss -tuln | grep $(OLLAMA_PORT); \
	else \
		echo "端口 $(OLLAMA_PORT) 未被监听"; \
	fi
	
	@echo "\nIP地址信息:"
	@ss -tuln | grep $(OLLAMA_PORT) | grep -o '[0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}' | sort | uniq || echo "未找到相关IP"

#############################################
# 1. 服务生命周期管理
#############################################

# 安装Ollama服务（暂不验证）
.PHONY: install
install:
	@echo "正在安装 $(SERVER_NAME)..."
	@if [ -f $(OLLAMA_BIN) ]; then \
		echo "$(SERVER_NAME) 已安装在 $(OLLAMA_BIN)"; \
	else \
		echo "下载 $(SERVER_NAME) 二进制文件..."; \
		curl -L $(OLLAMA_RELEASE_URL) -o /tmp/ollama; \
		echo "安装到 $(INSTALL_DIR)..."; \
		sudo mv /tmp/ollama $(INSTALL_DIR)/ollama; \
		sudo chmod +x $(INSTALL_DIR)/ollama; \
		echo "$(SERVER_NAME) 安装完成!"; \
	fi

# 以前台模式启动Ollama服务
.PHONY: start
start:
	@echo "以前台模式启动 $(SERVER_NAME) 服务..."
	@PID=`ps aux | grep "$(SERVER_NAME)" | grep -v grep | awk '{print $$2}'`; \
	if [ -n "$$PID" ]; then \
		echo "$(SERVER_NAME) 服务已经在运行，PID: $$PID"; \
	else \
		$(OLLAMA_BIN) serve; \
	fi

# 以后台模式启动Ollama服务
.PHONY: start-background
start-background:
	@echo "以后台模式启动 $(SERVER_NAME) 服务..."
	@PID=`ps aux | grep "$(SERVER_NAME)" | grep -v grep | awk '{print $$2}'`; \
	if [ -n "$$PID" ]; then \
		echo "$(SERVER_NAME) 服务已经在运行，PID: $$PID"; \
	else \
		nohup $(OLLAMA_BIN) serve > $(HOME)/ollama.log 2>&1 & \
		echo "$(SERVER_NAME) 服务已在后台启动"; \
	fi

# 停止Ollama服务
.PHONY: stop
stop:
	@echo "停止 $(SERVER_NAME) 服务..."
	@PID=`ps aux | grep "$(SERVER_NAME)" | grep -v grep | awk '{print $$2}'`; \
	if [ -n "$$PID" ]; then \
		echo "发现 $(SERVER_NAME) 进程 (PID: $$PID)，正在停止..."; \
		for pid in $$PID; do \
			kill -15 $$pid; \
			echo "已发送终止信号到进程 $$pid"; \
		done; \
	else \
		echo "未发现运行中的 $(SERVER_NAME) 进程"; \
	fi

# 重启Ollama服务
.PHONY: restart
restart: stop
	@echo "等待服务完全停止..."
	@sleep 2
	@make start-background

#############################################
# 2. 模型资源管理
#############################################

# 查看已安装的模型列表
.PHONY: models
models:
	@echo "===== 已安装的模型列表 ====="
	@if [ -d $(OLLAMA_MODELS_DIR) ]; then \
		$(OLLAMA_BIN) list; \
	else \
		echo "模型目录不存在，请先安装并运行 $(SERVER_NAME)"; \
	fi

# 拉取指定模型
.PHONY: pull
pull:
	@echo "拉取模型中..."
	@if [ -z "$(MODEL)" ]; then \
		echo "正在拉取默认模型 $(DEFAULT_MODEL)..."; \
		$(OLLAMA_BIN) pull $(DEFAULT_MODEL); \
	else \
		echo "正在拉取模型 $(MODEL)..."; \
		$(OLLAMA_BIN) pull $(MODEL); \
	fi

# 启动与模型的交互式会话
.PHONY: run
run:
	@echo "启动交互式会话..."
	@if [ -z "$(MODEL)" ]; then \
		echo "使用默认模型 $(DEFAULT_MODEL)"; \
		$(OLLAMA_BIN) run $(DEFAULT_MODEL); \
	else \
		echo "使用模型 $(MODEL)"; \
		$(OLLAMA_BIN) run $(MODEL); \
	fi

# 删除指定模型
.PHONY: rm-model
rm-model:
	@if [ -z "$(MODEL)" ]; then \
		echo "请指定要删除的模型名称，例如: make rm-model MODEL=llama3"; \
	else \
		echo "删除模型 $(MODEL)..."; \
		$(OLLAMA_BIN) rm $(MODEL); \
		echo "模型 $(MODEL) 已删除"; \
	fi

#############################################
# 3. GPU资源优化与监控
#############################################

# 监控GPU状态
.PHONY: gpu-status
gpu-status:
	@echo "===== GPU 资源使用情况 ====="
	@if command -v nvidia-smi > /dev/null; then \
		nvidia-smi --query-gpu=index,name,temperature.gpu,utilization.gpu,utilization.memory,memory.total,memory.free,memory.used --format=csv,noheader; \
	else \
		echo "未找到 nvidia-smi 命令，请确认NVIDIA驱动是否正确安装"; \
	fi

# 自定义GPU层数启动模型（多GPU负载均衡）
.PHONY: run-layers
run-layers:
	@if [ -z "$(MODEL)" ]; then \
		echo "请指定模型名称，例如: make run-layers MODEL=llama3 LAYERS=40"; \
		exit 1; \
	fi
	@if [ -z "$(LAYERS)" ]; then \
		echo "请指定GPU层数，例如: make run-layers MODEL=$(MODEL) LAYERS=40"; \
		exit 1; \
	fi
	@echo "使用自定义GPU层数启动模型 $(MODEL)，层数: $(LAYERS)"
	@OLLAMA_GPU_LAYERS=$(LAYERS) $(OLLAMA_BIN) run $(MODEL)

#############################################
# 4. 运维与监控
#############################################

# 查看系统资源使用情况
.PHONY: resources
resources:
	@echo "===== 系统资源使用情况 ====="
	@echo "CPU使用率:"
	@top -bn1 | grep "Cpu(s)" | awk '{print $$2 + $$4 "% used"}'
	@echo "\n内存使用情况:"
	@free -h | grep "Mem:"
	@echo "\n磁盘使用情况:"
	@df -h | grep -E '/$|/home'
	@echo "\n$(SERVER_NAME) 进程资源使用:"
	@PID=`ps aux | grep "$(SERVER_NAME)" | grep -v grep | awk '{print $$2}'`; \
	if [ -n "$$PID" ]; then \
		ps -p $$PID -o pid,vsz=内存,pcpu=CPU,%mem=内存占用率,comm; \
	else \
		echo "$(SERVER_NAME) 进程未运行"; \
	fi

# 查看日志
.PHONY: logs
logs:
	@echo "===== $(SERVER_NAME) 日志 ====="
	@if [ -f $(HOME)/ollama.log ]; then \
		tail -n 50 $(HOME)/ollama.log; \
	else \
		echo "日志文件不存在"; \
	fi

# 实时监控日志
.PHONY: logs-follow
logs-follow:
	@echo "===== 实时监控 $(SERVER_NAME) 日志 ====="
	@if [ -f $(HOME)/ollama.log ]; then \
		tail -f $(HOME)/ollama.log; \
	else \
		echo "日志文件不存在"; \
	fi

#############################################
# 5. API服务管理
#############################################

# 测试API是否正常响应
.PHONY: test-api
test-api:
	@echo "测试 $(SERVER_NAME) API 服务..."
	@curl -s $(OLLAMA_API)/api/tags 2>/dev/null | grep -q tag; \
	if [ $$? -eq 0 ]; then \
		echo "API 服务正常响应"; \
		curl -s $(OLLAMA_API)/api/tags | grep tag | wc -l | awk '{print "已安装模型数量: " $$1}'; \
	else \
		echo "API 服务未响应，请检查服务是否启动"; \
	fi

# 使用API发送聊天请求
.PHONY: chat-api
chat-api:
	@if [ -z "$(MODEL)" ]; then \
		MODEL="$(DEFAULT_MODEL)"; \
	fi
	@if [ -z "$(PROMPT)" ]; then \
		echo "请提供对话内容，例如: make chat-api MODEL=llama3 PROMPT='你好，请介绍一下自己'"; \
		exit 1; \
	fi
	@echo "向模型 $(MODEL) 发送对话: $(PROMPT)"
	@curl -s $(OLLAMA_API)/api/chat -d "{ \"model\": \"$(MODEL)\", \"messages\": [{ \"role\": \"user\", \"content\": \"$(PROMPT)\" }] }" | grep -oP '(?<="content":")\K[^"]+'

# 显示帮助信息
.PHONY: help
help:
	@echo "===== Ollama大语言模型部署与运维管理 ====="
	@echo "使用方法:"
	@echo ""
	@echo "1. 服务生命周期管理:"
	@echo "  make install           - 安装Ollama服务"
	@echo "  make start             - 前台启动Ollama服务"
	@echo "  make start-background  - 后台启动Ollama服务"
	@echo "  make stop              - 停止Ollama服务"
	@echo "  make restart           - 重启Ollama服务"
	@echo "  make status            - 查询服务状态信息"
	@echo ""
	@echo "2. 模型资源管理:"
	@echo "  make models            - 列出已安装的模型"
	@echo "  make pull [MODEL=模型名]    - 拉取指定模型(默认: $(DEFAULT_MODEL))"
	@echo "  make run [MODEL=模型名]     - 运行交互式模型会话(默认: $(DEFAULT_MODEL))"
	@echo "  make rm-model MODEL=模型名  - 删除指定模型"
	@echo ""
	@echo "3. GPU资源优化与监控:"
	@echo "  make gpu-status                     - 查看GPU资源使用情况"
	@echo "  make run-layers MODEL=模型名 LAYERS=层数 - 自定义GPU层数启动模型"
	@echo ""
	@echo "4. 运维与监控:"
	@echo "  make resources         - 查看系统资源使用情况"
	@echo "  make logs              - 查看服务日志"
	@echo "  make logs-follow       - 实时监控服务日志"
	@echo ""
	@echo "5. API服务管理:"
	@echo "  make test-api                                  - 测试API服务是否正常响应"
	@echo "  make chat-api [MODEL=模型名] PROMPT='对话内容'   - 使用API发送聊天请求"
	@echo ""
	@echo "示例:"
	@echo "  make pull MODEL=llama3       - 拉取llama3模型"
	@echo "  make run MODEL=llama3        - 启动与llama3模型的交互式会话"
	@echo "  make chat-api PROMPT='你好'  - 向默认模型发送对话请求"