import requests
# # 调用 vLLM API
# class vllmConfig():
#     VLLM_HOST="192.168.10.3"
#     VLLM_PORT="8000"
# async def query_vllm(prompt,VLLM_HOST,VLLM_PORT):
#     url = f"http://{VLLM_HOST}:{VLLM_PORT}/predict"
#     payload = {
#         "prompt": prompt,
#         "max_tokens": 512,
#         "temperature": 1e-5
#     }
#     resp = requests.post(url, json=payload)
#     if resp.status_code == 200:
#         return resp.json().get("text", "")
#     else:
#         raise RuntimeError(f"vLLM API error: {resp.status_code} {resp.text}")



async def query_vllm(prompt,VLLM_BASE_URL,VLLM_API_KEY,VLLM_MODEL,temperature=1e-5, max_tokens=1024,strict=False):
    """通用的 vLLM 调用方法"""
    url = f"{VLLM_BASE_URL}/chat/completions"
    print("VLLM_API_KEY=============================",VLLM_API_KEY)
    headers = {
        "Authorization": f"Bearer {VLLM_API_KEY}",
        "Content-Type": "application/json"
    }

    payload = {
        "model": VLLM_MODEL,
        "messages": [
            {"role": "system", "content": "You are a helpful assistant."},
            {"role": "user", "content": prompt}
        ],
        "temperature": temperature,
        "max_tokens": max_tokens
    }
    # print("===================payload=============",payload)
    # response = requests.post(url, headers=headers, json=payload, timeout=60)
    # response.raise_for_status()
    # data = response.json()
    # return data["choices"][0]["message"]["content"]
    try:
        response = requests.post(url, headers=headers, json=payload, timeout=60)
        response.raise_for_status()
        data = response.json()
        content = data.get("choices", [{}])[0].get("message", {}).get("content", "").strip()
        print(content,"=============================content=======================================")
        if strict:
            # 仅保留 '和问题相关' 或 '和问题不相关'
            if "和问题相关" in content:
                return "和问题相关"
            else:
                return "和问题不相关"
        return content
    except requests.exceptions.RequestException as e:
        print("VLLM调用异常:", e)
        return "调用vLLM失败"