source /usr/local/Ascend/nnal/atb/set_env.sh
source /usr/local/Ascend/ascend-toolkit/set_env.sh
CONFIG_FILE="./vllm_config.yaml"
source ./set_env_vllm_ascend.sh
MASTER_IP=$(echo "$IPS" | head -n 1 | awk '{print $1}')
log_dir=${LOGSDIR:-"./qwen3_logs"}
if [ ! -d "$log_dir" ]; then
    mkdir -p "$log_dir"
fi
if [ "$HCCL_IF_IP" == "$MASTER_IP" ]; then
	ray start --head --num-gpus=$NUM_NPU >  $log_dir/ray_start_master.log
else
	ray start --address="${MASTER_IP}:6379" --num-gpus=$NUM_NPU --node-ip-address=$HCCL_IF_IP > $log_dir/ray_start_worker.log
fi

sleep 5
status_output=$(ray status)
active_status=$(echo "$status_output" | grep -A 1 "Active:")

if [[ "$active_status" != *"Active"* ]]; then
    echo "${HCCL_IF_IP} Ray启动失败，请检查${log_dir}下的Ray日志。"
    exit 1
fi

if [ "$HCCL_IF_IP" == "$MASTER_IP" ]; then
	cmd="python -m vllm.entrypoints.openai.api_server \
		--model=$MODEL_PATH \
		--trust-remote-code \
		--distributed_executor_backend ray \
		--disable-frontend-multiprocessing \
		--port $PORT"
	for var in SERVED_MODEL_NAME MAX_MODEL_LEN TENSOR_PARALLEL_SIZE PIPELINE_PARALLEL_SIZE; do
		if [ -n "${!var}" ]; then
			normalized_var=$(echo "${var}" | tr '_' '-' | tr '[:upper:]' '[:lower:]')
			cmd+=" --$normalized_var ${!var}"
		fi
	done
	log_file="$log_dir/vllm_api_server.log"
	cmd+=" > $log_file 2>&1 &"
	eval "$cmd"
	server_id=$!
	while true; do
		if grep -q "Started server process" "$log_file"; then
			echo "${MASTER_IP}:${PORT} 服务启动成功!"
			break
		elif grep -q "kill" "$log_file"; then
			echo "检测到 'kill' 信号，退出循环。"
			exit 1
		elif ! kill -0 $server_id 2>/dev/null; then
			echo "进程 ${server_id} 终止。请查看日志文件: $log_file"
			exit 1
		fi
		sleep 1
	done
fi