import requests

def ollama_completion(prompt, model="deepseek", max_tokens=100, temperature=0.7):
    # Ollama 的本地 API 地址
    OLLAMA_URL = "http://localhost:11434/api/generate"

    # 准备请求数据
    data = {
        "model": model,
        "prompt": prompt,
        "options": {
            "max_tokens": max_tokens,
            "temperature": temperature
        },
        "stream": False
    }

    # 发送 POST 请求
    response = requests.post(OLLAMA_URL, json=data)

    # 检查响应状态
    if response.status_code == 200:
        result = response.json()
        return result["response"]
    else:
        raise Exception(f"请求失败: {response.status_code}, {response.text}")

# 示例调用
prompt = "你好，Ollama！"
response = ollama_completion(prompt, model="deepseek-r1:7b")
print("模型输出:", response)