import requests
from config import oneapi_api_key,oneapi_url
from openai import OpenAI
def request_http_Chat(model:str,query: str, temperature=0.3,url=oneapi_url,api_key=oneapi_api_key) -> str:
    """
    使用requests库调用Moonshot AI的API与Kimi进行聊天。
    :param query: 用户的查询字符串。
    :param temperature: 用于控制回答的随机性，范围从0到1。
    :return: Kimi的回答。
    """
    headers = {
        "Content-Type": "application/json",
        "Authorization": f"Bearer {api_key}"
    }
    data = {
        "model":model,
        "messages": [
           # {"role": "system", "content": "你是 Kimi，由 Moonshot AI 提供的人工智能助手。"},
            {"role": "user", "content": query}
        ],
        "temperature": temperature,
        'stream':False
    }# streaming = True False 都行
    try:
        response = requests.post(url, json=data, headers=headers)
        response.raise_for_status()
        completion = response.json()
        print(completion)
        return completion['choices'][0]['message']['content']
    except Exception as e:
        return f"An error occurred: {e}"


def get_xinference_client():
    """
    获取xinfer的client
    :return:
    """
    url = "http://10.106.153.12:9997/v1"
    client = OpenAI(
        api_key='EMPTY',
        base_url=url,
    )
    return client

def chat_openai(client,messages,model = "qwen1half-32b-chat"):
    completion = client.chat.completions.create(
      model=model,
      messages=messages,  temperature=0.3)
    return completion.choices[0].message.content

def sample_query():
    return {"role": "user", "content": '你好'}