#!/bin/bash
export VLLM_ENGINE_ITERATION_TIMEOUT_S=36000
export VLLM_RPC_TIMEOUT=36000000
export VLLM_ENFORCE_CUDA_GRAPH=1

XINFERENCE_MODEL_SRC=modelscope XINFERENCE_HOME=/xinference/xinference_cache xinference-local -H 0.0.0.0 &
while true; do
  if curl -s "http://localhost:9997" > /dev/null; then
    break
  else
    sleep 1
  fi
done

xinference launch --model-engine vllm --model-name deepseek-r1-distill-qwen --size-in-billions 32 --model-format pytorch  --model_path /mnt/nvme1n1/DeepSeek-R1-Distill-Qwen-32B/ --n-gpu 4 --replica 4 --enable_prefix_caching True --enable_chunked_prefill True --max_model_len $[1024*32] --trust-remote-code 1

PID1=$!
wait $PID1
wait


