import requests


def ollama_qa(prompt, model="gemma3"):
    """
    调用本地Ollama的问答接口（非流式）。
    :param prompt: 用户输入的问题
    :param model: 使用的模型名称，默认gemma3
    :return: 回答内容字符串
    """
    url = "http://localhost:11434/api/generate"
    payload = {"model": model, "prompt": prompt, "stream": False}
    try:
        resp = requests.post(url, json=payload, timeout=60)
        resp.raise_for_status()
        data = resp.json()
        return data.get("response", "")
    except Exception as e:
        return f"[Ollama接口调用失败] {e}"


def ollama_qa_stream(prompt, model="gemma3"):
    """
    调用本地Ollama的问答接口（流式输出）。
    :param prompt: 用户输入的问题
    :param model: 使用的模型名称，默认gemma3
    :yield: 逐步生成的内容字符串
    """
    url = "http://localhost:11434/api/generate"
    payload = {"model": model, "prompt": prompt, "stream": True}
    try:
        with requests.post(url, json=payload, stream=True, timeout=60) as resp:
            resp.raise_for_status()
            for line in resp.iter_lines():
                if line:
                    try:
                        data = line.decode("utf-8")
                        # Ollama流式返回每行是一个json对象
                        import json

                        obj = json.loads(data)
                        if "response" in obj:
                            yield obj["response"]
                    except Exception:
                        continue
    except Exception as e:
        yield f"[Ollama接口调用失败] {e}"


# 示例用法
if __name__ == "__main__":
    question = "鲁迅和周树人什么关系?"
    print("--- 非流式 ---")
    # answer = ollama_qa(question)
    # print("Ollama回答：", answer)
    print("--- 流式 ---")
    for part in ollama_qa_stream(question):
        print(part, end="", flush=True)
    print()
