# vLLM服务管理Makefile

# 默认参数配置
# MODEL_PATH := Qwen/Qwen2.5-VL-72B-Instruct-AWQ
MODEL_PATH := Qwen/Qwen2.5-VL-72B-Instruct-AWQ
SERVED_MODEL_NAME := Qwen2.5-VL-72B-Instruct-AWQ
PORT := 8197
TENSOR_PARALLEL_SIZE := 8
MAX_MODEL_LEN := 8192
MAX_NUM_SEQS := 2
GPU_MEMORY_UTILIZATION := 0.75
SWAP_SPACE := 16
# ROPE_SCALING := {"rope_type":"linear","factor":2.0}
HOST := 0.0.0.0
CUDA_DEVICES := 0,1,2,3,4,5,6,7
LOG_FILE := vllm_service.log
API_KEY := dummy-key

# API_KEY := sk-J5GlMGUxxP5k7sCkL+oY2KRxq6qocOuJ0lKrv0CzsuM=
# 是否自动清理GPU显存：true为清理，false为不清理
CLEAN_GPU := true

# 主启动目标 - 直接执行命令，而不是通过sh脚本
.PHONY: start
start:
	@echo "启动vLLM服务..."
	@echo "" >> $(LOG_FILE)
	@echo "====================================" >> $(LOG_FILE)
	@echo "$$(date): 启动 vLLM 服务脚本" >> $(LOG_FILE)
	@echo "====================================" >> $(LOG_FILE)
	@echo "$$(date): 尝试初始化conda环境..." | tee -a $(LOG_FILE)
	@export CUDA_VISIBLE_DEVICES=$(CUDA_DEVICES) && \
	eval "$$(conda shell.bash hook)" && \
	conda activate myvllm || source $$(conda info --base)/etc/profile.d/conda.sh && conda activate myvllm && \
	if [ "$$(conda info --envs | grep '*' | awk '{print $$1}')" != "myvllm" ]; then \
		echo "$$(date): 警告: conda环境激活失败，尝试使用其他方式..." | tee -a $(LOG_FILE); \
		source $$(conda info --base)/etc/profile.d/conda.sh; \
		conda activate myvllm; \
	fi && \
	if [ "$$(conda info --envs | grep '*' | awk '{print $$1}')" != "myvllm" ]; then \
		echo "$$(date): 错误: 无法激活conda环境myvllm，请检查环境配置" | tee -a $(LOG_FILE); \
		exit 1; \
	fi && \
	echo "$$(date): 当前环境: $$(conda info --envs | grep '*')" | tee -a $(LOG_FILE) && \
	echo "$$(date): Python路径: $$(which python)" | tee -a $(LOG_FILE) && \
	echo "$$(date): vLLM路径: $$(which vllm 2>/dev/null || echo 'vllm命令未找到')" | tee -a $(LOG_FILE) && \
	python -c "import torch; print('CUDA可用:', torch.cuda.is_available()); print('GPU数量:', torch.cuda.device_count())" 2>&1 | tee -a $(LOG_FILE) && \
	echo "" && \
	if [ "$(CLEAN_GPU)" = "true" ]; then \
		echo "$$(date): 清理GPU显存..." | tee -a $(LOG_FILE); \
		for device in $$(echo $(CUDA_DEVICES) | tr ',' ' '); do \
			echo "清理GPU $$device 显存"; \
			CUDA_VISIBLE_DEVICES=$$device python -c "import torch; torch.cuda.empty_cache()" 2>/dev/null || echo "清理GPU $$device 显存失败"; \
		done; \
		nvidia-smi >> $(LOG_FILE); \
	else \
		echo "$$(date): 已设置为不清理GPU显存" | tee -a $(LOG_FILE); \
	fi && \
	echo "$$(date): 启动vLLM服务..." | tee -a $(LOG_FILE) && \
	vllm serve $(MODEL_PATH) \
	  --trust-remote-code \
	  --tensor-parallel-size $(TENSOR_PARALLEL_SIZE) \
	  --max-model-len $(MAX_MODEL_LEN) \
	  --max-num-seqs $(MAX_NUM_SEQS) \
	  --gpu-memory-utilization $(GPU_MEMORY_UTILIZATION) \
	  --swap-space $(SWAP_SPACE) \
	  $(if $(ROPE_SCALING),--rope-scaling '$(ROPE_SCALING)',) \
	  --enforce-eager \
	  --dtype bfloat16 \
	  --served-model-name $(SERVED_MODEL_NAME) \
	  --api-key $(API_KEY) \
	  --port $(PORT) \
	  --host $(HOST) 2>&1 | tee -a $(LOG_FILE) || \
	{ EXIT_CODE=$$?; \
	  echo "$$(date): vLLM服务退出，退出码: $$EXIT_CODE" | tee -a $(LOG_FILE); \
	  if [ "$(CLEAN_GPU)" = "true" ]; then \
	    echo "$$(date): 清理GPU显存..." | tee -a $(LOG_FILE); \
	    for device in $$(echo $(CUDA_DEVICES) | tr ',' ' '); do \
	      echo "清理GPU $$device 显存" | tee -a $(LOG_FILE); \
	      CUDA_VISIBLE_DEVICES=$$device python -c "import torch; torch.cuda.empty_cache()" 2>/dev/null || echo "清理GPU $$device 显存失败" | tee -a $(LOG_FILE); \
	    done; \
	    echo "$$(date): 显存清理完成" | tee -a $(LOG_FILE); \
	    nvidia-smi >> $(LOG_FILE); \
	  fi; \
	  exit $$EXIT_CODE; \
	}

# 后台运行服务
.PHONY: daemon
daemon:
	@echo "在后台启动vLLM服务..."
	@nohup $(MAKE) -s start > vllm_daemon.log 2>&1 &
	@echo $$! > vllm.pid
	@echo "服务已在后台启动，PID: `cat vllm.pid`"

# 停止服务并清理显存
.PHONY: stop
stop:
	@if [ -f vllm.pid ]; then \
		echo "停止vLLM服务 (PID: `cat vllm.pid`)..."; \
		kill `cat vllm.pid` 2>/dev/null || echo "进程已结束"; \
		rm vllm.pid; \
		if [ "$(CLEAN_GPU)" = "true" ]; then \
			$(MAKE) cleanup_gpu; \
		else \
			echo "已设置为不清理GPU显存"; \
		fi; \
	else \
		echo "找不到vLLM服务PID文件"; \
		PID=`ps aux | grep "vllm serve $(MODEL_PATH)" | grep -v grep | awk '{print $$2}'`; \
		if [ -n "$$PID" ]; then \
			echo "找到vLLM进程 PID: $$PID"; \
			kill $$PID; \
			echo "已发送终止信号"; \
			if [ "$(CLEAN_GPU)" = "true" ]; then \
				$(MAKE) cleanup_gpu; \
			else \
				echo "已设置为不清理GPU显存"; \
			fi; \
		else \
			echo "未找到运行中的vLLM服务"; \
		fi; \
	fi

# 查看服务状态
.PHONY: status
status:
	@if [ -f vllm.pid ] && ps -p `cat vllm.pid` > /dev/null; then \
		echo "vLLM服务正在运行 (PID: `cat vllm.pid`)"; \
	else \
		PID=`ps aux | grep "vllm serve $(MODEL_PATH)" | grep -v grep | awk '{print $$2}'`; \
		if [ -n "$$PID" ]; then \
			echo "vLLM服务正在运行 (PID: $$PID)"; \
		else \
			echo "vLLM服务未运行"; \
		fi; \
	fi

# 查看日志
.PHONY: logs
logs:
	@tail -f $(LOG_FILE)

# 查看最近50行日志
.PHONY: log
log:
	@tail -n 50 $(LOG_FILE)

# 重启服务
.PHONY: restart
restart: stop
	@sleep 5
	@$(MAKE) daemon

# 清理GPU显存
.PHONY: cleanup_gpu
cleanup_gpu:
	@echo "正在清理GPU显存..."
	@echo "注意：此命令只能清理未被进程使用的显存。要完全释放所有显存，请使用 make kill_all"
	@for device in $(shell echo $(CUDA_DEVICES) | tr ',' ' '); do \
		echo "清理GPU $$device 显存"; \
		CUDA_VISIBLE_DEVICES=$$device python -c "import torch; torch.cuda.empty_cache();" 2>/dev/null || echo "清理GPU $$device 显存失败"; \
	done
	@echo "尝试释放可能的孤立GPU内存..."
	@PS_OUTPUT=$$(ps aux | grep -E "vllm|python.*$(MODEL_PATH)" | grep -v "grep"); \
	if [ -n "$$PS_OUTPUT" ]; then \
		echo "警告：以下vLLM相关进程仍在运行，占用的GPU内存无法被释放："; \
		echo "$$PS_OUTPUT"; \
		echo "要完全释放GPU内存，请先运行：make kill_all"; \
	fi
	@echo "显存清理完成（仅清理了未使用的缓存）"
	@nvidia-smi

# 强制杀死所有vLLM相关进程并清理GPU
.PHONY: kill_all
kill_all:
	@echo "正在杀死所有vLLM相关进程..."
	@ps aux | grep -E "vllm|python.*$(MODEL_PATH)" | grep -v grep | awk '{print $$2}' | xargs -r kill -9
	@rm -f vllm.pid
	@if [ "$(CLEAN_GPU)" = "true" ]; then \
		$(MAKE) cleanup_gpu; \
	else \
		echo "已设置为不清理GPU显存"; \
	fi
	@echo "所有vLLM相关进程已终止"

# 完全清理GPU显存
.PHONY: full_cleanup_gpu
full_cleanup_gpu:
	@echo "正在尝试完全清理GPU显存..."
	@echo "步骤1: 停止所有vLLM相关进程"
	@ps aux | grep -E "vllm|python.*$(MODEL_PATH)" | grep -v grep | awk '{print $$2}' | xargs -r kill -9
	@rm -f vllm.pid
	@echo "步骤2: 等待1秒以确保进程完全终止"
	@sleep 1
	@echo "步骤3: 为每个GPU运行显存清理"
	@for device in $(shell echo $(CUDA_DEVICES) | tr ',' ' '); do \
		echo "清理GPU $$device 显存"; \
		CUDA_VISIBLE_DEVICES=$$device python -c "import torch; torch.cuda.empty_cache();" 2>/dev/null || echo "清理GPU $$device 显存失败"; \
	done
	
	# @echo "步骤4: 额外清理步骤（普通用户权限）"
	# @for device in $(shell echo $(CUDA_DEVICES) | tr ',' ' '); do \
	# 	echo "尝试在GPU $$device 上运行小型程序强制清理..."; \
	# 	CUDA_VISIBLE_DEVICES=$$device python -c "import torch; torch.zeros(1, device='cuda').cuda()" 2>/dev/null || \
	# 	echo "GPU $$device 额外清理失败"; \
	# done
	# @echo "步骤5: 再次运行显存清理"
	# @for device in $(shell echo $(CUDA_DEVICES) | tr ',' ' '); do \
	# 	CUDA_VISIBLE_DEVICES=$$device python -c "import torch; torch.cuda.empty_cache();" 2>/dev/null; \
	# done

	@echo "步骤: 检查GPU状态"
	@nvidia-smi
	@echo "注意: 如果仍有大量显存被占用，可能有其他程序在使用GPU"
	@echo "      可以使用 'nvidia-smi' 查看占用进程的PID，然后手动终止"
	@echo "      或使用 'make kill_gpu_process PID=进程号' 终止特定进程"
	@echo "GPU显存清理操作完成"

# 终止指定的GPU进程
.PHONY: kill_gpu_process
kill_gpu_process:
	@if [ -z "$(PID)" ]; then \
		echo "错误: 未指定PID"; \
		echo "使用方法: make kill_gpu_process PID=<进程ID>"; \
		echo "当前GPU进程:"; \
		nvidia-smi | grep -E "P[0-9]+"; \
		exit 1; \
	fi; \
	echo "尝试终止进程 $(PID)..."; \
	ps -p $(PID) -o comm= 2>/dev/null | grep -q ""; \
	if [ $$? -eq 0 ]; then \
		echo "找到进程 $(PID)"; \
		kill -15 $(PID) 2>/dev/null; \
		sleep 1; \
		ps -p $(PID) -o comm= 2>/dev/null | grep -q ""; \
		if [ $$? -eq 0 ]; then \
			echo "进程未终止，尝试强制终止..."; \
			kill -9 $(PID) 2>/dev/null; \
			echo "已发送强制终止信号"; \
		else \
			echo "进程已终止"; \
		fi; \
	else \
		echo "未找到进程 $(PID)"; \
	fi; \
	echo "清理GPU显存..."; \
	for device in $(shell echo $(CUDA_DEVICES) | tr ',' ' '); do \
		CUDA_VISIBLE_DEVICES=$$device python -c "import torch; torch.cuda.empty_cache();" 2>/dev/null; \
	done; \
	echo "当前GPU状态:"; \
	nvidia-smi

# ----- 容器监控功能 -----

# 获取vllm相关容器信息
.PHONY: list_containers
list_containers:
	@echo "正在获取vllm相关容器..."
	@CONTAINER_LIST=$$(docker ps | grep -i "vllm"); \
	if [ -z "$$CONTAINER_LIST" ]; then \
		echo "未找到vllm相关容器，尝试搜索所有容器信息..."; \
		CONTAINER_LIST=$$(docker ps --format "{{.ID}}\t{{.Image}}\t{{.Names}}\t{{.Command}}" | grep -i "vllm"); \
		if [ -z "$$CONTAINER_LIST" ]; then \
			echo "未找到vllm相关容器，请确认容器是否运行中。"; \
			echo "列出所有容器以供参考："; \
			docker ps; \
			exit 1; \
		fi; \
	fi; \
	echo "找到以下vllm相关容器："; \
	echo "$$CONTAINER_LIST"; \
	CONTAINER_IDS=$$(echo "$$CONTAINER_LIST" | awk '{print $$1}'); \
	echo "容器ID列表: $$CONTAINER_IDS"; \
	for CONTAINER_ID in $$CONTAINER_IDS; do \
		CONTAINER_NAME=$$(docker inspect --format '{{.Name}}' $$CONTAINER_ID 2>/dev/null | sed 's/\///'); \
		CONTAINER_IMAGE=$$(docker inspect --format '{{.Config.Image}}' $$CONTAINER_ID 2>/dev/null); \
		echo "容器名称: $$CONTAINER_NAME (ID: $$CONTAINER_ID)"; \
		echo "镜像: $$CONTAINER_IMAGE"; \
	done

# 监控指定容器的PID和端口
.PHONY: monitor_container
monitor_container:
	@if [ -z "$(CONTAINER_ID)" ]; then \
		echo "错误: 未指定容器ID"; \
		echo "使用方法: make monitor_container CONTAINER_ID=<容器ID>"; \
		exit 1; \
	fi; \
	echo "监控容器 $(CONTAINER_ID)..."; \
	CONTAINER_NAME=$$(docker inspect --format '{{.Name}}' $(CONTAINER_ID) 2>/dev/null | sed 's/\///'); \
	CONTAINER_IMAGE=$$(docker inspect --format '{{.Config.Image}}' $(CONTAINER_ID) 2>/dev/null); \
	echo "容器名称: $$CONTAINER_NAME (ID: $(CONTAINER_ID))"; \
	echo "镜像: $$CONTAINER_IMAGE"; \
	CONTAINER_PID=$$(docker inspect --format '{{.State.Pid}}' $(CONTAINER_ID) 2>/dev/null); \
	if [ -z "$$CONTAINER_PID" ] || [ "$$CONTAINER_PID" = "0" ]; then \
		echo "无法获取容器PID信息"; \
		exit 1; \
	fi; \
	echo "容器主进程PID: $$CONTAINER_PID"; \
	echo "容器内进程信息:"; \
	docker exec $(CONTAINER_ID) ps aux 2>/dev/null || echo "无法执行ps命令，可能容器中未安装ps工具"; \
	echo "容器端口映射信息:"; \
	PORTS=$$(docker port $(CONTAINER_ID) 2>/dev/null); \
	if [ -z "$$PORTS" ]; then \
		echo "该容器未配置端口映射，尝试从容器配置中查找端口..."; \
		EXPOSED_PORTS=$$(docker inspect --format='{{range $$p, $$conf := .Config.ExposedPorts}}{{$$p}} {{end}}' $(CONTAINER_ID) 2>/dev/null); \
		if [ -n "$$EXPOSED_PORTS" ]; then \
			echo "容器暴露的端口: $$EXPOSED_PORTS"; \
		else \
			echo "未找到暴露的端口配置"; \
		fi; \
	else \
		echo "$$PORTS"; \
		HOST_PORTS=$$(echo "$$PORTS" | awk -F':' '{print $$NF}'); \
		echo "主机上端口使用情况 (lsof):"; \
		for PORT in $$HOST_PORTS; do \
			echo "端口 $$PORT 的lsof信息:"; \
			lsof -i:$$PORT -P -n 2>/dev/null || echo "无法获取端口 $$PORT 的lsof信息，可能需要更高权限"; \
		done; \
	fi; \
	echo "与PID $$CONTAINER_PID 相关的网络连接:"; \
	ss -tuln -p 2>/dev/null | grep "$$CONTAINER_PID" 2>/dev/null || \
	echo "无法获取与PID $$CONTAINER_PID 相关的网络连接信息"; \
	echo "获取容器使用的GPU信息:"; \
	nvidia-smi -q -i 0 -d PIDS 2>/dev/null | grep -A8 "Processes" | grep -B2 "$$CONTAINER_PID" || \
	nvidia-smi | grep "$$CONTAINER_PID" || \
	echo "无法获取容器 $(CONTAINER_ID) 使用的GPU信息"

# 监控所有vllm相关容器
.PHONY: monitor
monitor:
	@echo "监控所有vLLM相关容器..."
	@CONTAINER_LIST=$$(docker ps | grep -i "vllm"); \
	if [ -z "$$CONTAINER_LIST" ]; then \
		CONTAINER_LIST=$$(docker ps --format "{{.ID}}\t{{.Image}}\t{{.Names}}\t{{.Command}}" | grep -i "vllm"); \
	fi; \
	if [ -z "$$CONTAINER_LIST" ]; then \
		echo "未找到vllm相关容器"; \
		exit 1; \
	fi; \
	CONTAINER_IDS=$$(echo "$$CONTAINER_LIST" | awk '{print $$1}'); \
	for CONTAINER_ID in $$CONTAINER_IDS; do \
		$(MAKE) monitor_container CONTAINER_ID=$$CONTAINER_ID; \
	done

# 查看所有容器和GPU资源
.PHONY: resource
resource:
	@echo "==== Docker容器资源使用情况 ===="
	@docker stats --no-stream
	@echo "==== GPU资源使用情况 ===="
	@nvidia-smi

# 显示帮助信息
.PHONY: help
help:
	@echo "vLLM服务管理Makefile"
	@echo "使用方法:"
	@echo ""
	@echo "服务管理:"
	@echo "  make start           - 启动vLLM服务"
	@echo "  make daemon          - 在后台启动vLLM服务"
	@echo "  make stop            - 停止vLLM服务并清理显存"
	@echo "  make restart         - 重启vLLM服务"
	@echo "  make status          - 查看服务状态"
	@echo ""
	@echo "日志管理:"
	@echo "  make logs            - 持续查看日志"
	@echo "  make log             - 查看最近50行日志"
	@echo ""
	@echo "资源管理:"
	@echo "  make cleanup_gpu     - 清理未使用的GPU显存缓存（不会清理被进程占用的显存）"
	@echo "  make full_cleanup_gpu- 完全清理GPU显存（终止进程并清理，普通用户权限可用）"
	@echo "  make kill_all        - 强制杀死所有vLLM相关进程并清理GPU"
	@echo "  make kill_gpu_process PID=<进程ID> - 终止指定PID的GPU进程并清理显存"
	@echo "  make resource        - 查看容器和GPU资源使用情况"
	@echo ""
	@echo "容器监控:"
	@echo "  make list_containers - 列出所有vllm相关容器"
	@echo "  make monitor         - 监控所有vllm相关容器"
	@echo "  make monitor_container CONTAINER_ID=<容器ID> - 监控指定容器"
	@echo ""
	@echo "配置管理:"  
	@echo "  make help            - 显示此帮助信息"
	@echo ""
	@echo "当前配置:"
	@echo "  MODEL_PATH=$(MODEL_PATH)"
	@echo "  TENSOR_PARALLEL_SIZE=$(TENSOR_PARALLEL_SIZE)"
	@echo "  PORT=$(PORT)"
	@echo "  GPU_MEMORY_UTILIZATION=$(GPU_MEMORY_UTILIZATION)"
	@echo "  CUDA_DEVICES=$(CUDA_DEVICES)"
	@echo "  CLEAN_GPU=$(CLEAN_GPU) (设置为false可禁用自动清理GPU显存)"
	@echo ""
	@echo "使用示例:"
	@echo "  make CLEAN_GPU=false start        - 启动服务但不清理GPU显存"
	@echo "  make CLEAN_GPU=true cleanup_gpu   - 手动清理GPU显存"
	@echo "  make full_cleanup_gpu             - 完全清理所有GPU显存（普通用户可用）"
	@echo "  make kill_gpu_process PID=12345   - 终止PID为12345的进程并清理GPU" 