import requests

from ollama import  Client

client = Client()
response = client.generate("qwen2.5:7b", "什么是大模型？")



def send_prompt_to_ollama(prompt):
    url = "http://localhost:11434/api/generate"  # Ollama默认端口是11434
    headers = {"Content-Type": "application/json"}
    data = {
        "prompt": prompt,
        "model": "qwen2.5:7b"  # 指定使用的模型
    }
    response = requests.post(url, json=data, headers=headers)
    return response.json()


if __name__ == "__main__":
    # 示例：发送一个简单的提示给Qwen模型
    prompt = "什么是大模型？"
    result = send_prompt_to_ollama(prompt)
    print(result)
