#! /bin/bash

# python -m vllm.entrypoints.api_server --model ~/.cache/modelscope/hub/models/Qwen/Qwen2.5-Coder-3B-Instruct/ --gpu-memory-utilization=0.9 --max-num-seqs 16 --port 11435

# vllm serve 兼容openai的接口
# https://vllm.hyper.ai/docs/serving/openai-compatible-server
vllm serve ~/.cache/modelscope/hub/models/Qwen/Qwen2.5-Coder-3B-Instruct/ --served-model-name  qwen2.5-coder:3b-instruct --gpu-memory-utilization=0.8 --max-num-seqs 16 --port 11435
vllm serve ~/.cache/modelscope/hub/models/Qwen/Qwen2.5-Coder-14B-Instruct-GPTQ-Int4 \
--served-model-name qwen2.5-coder:7b-instruct \
--gpu-memory-utilization=0.8 \
--max-num-seqs 128 \
--port 11435

# vllm使用量化模型
# 下载 modelscope download --model Qwen/Qwen2.5-Coder-3B-Instruct-GPTQ-Int4
vllm serve ~/.cache/modelscope/hub/models/Qwen/Qwen2.5-Coder-3B-Instruct-GPTQ-Int4/ --served-model-name  qwen2.5-coder:3b-instruct --gpu-memory-utilization=0.6 --max-num-seqs 24 --port 11435

# llama.cpp启动reranker模型
./llama-server -m /home/featurize/work/reranker/bce-reranker-base_v1-GGUF/bce-reranker-base_v1-Q8_0.gguf --cont-batching --reranking --host 0.0.0.0 --port 11436 -np 1 --ctx-size 4096 -ngl 9999
./llama-server -m ~/.cache/modelscope/hub/models/gpustack/bce-reranker-base_v1-GGUF/bce-reranker-base_v1-Q8_0.gguf --cont-batching --reranking --host 0.0.0.0 --port 11436 -np 1 --ctx-size 4096 -ngl 9999
# curl测试重排序模型
curl -X POST \
  -H "Content-Type: application/json" \
  -d '{
    "query": "水草有没有根",
    "documents": ["你的头怎么尖尖的", "那我问你水草长在水里还是土里", "水里面的植物一般有根"]
  }' \
  home:11436/rerank
