from langchain_core.prompts import ChatPromptTemplate
from langchain_ollama.llms import OllamaLLM

# 定义提示模板
template = """Question: {question}

Answer: Let's think step by step."""
prompt = ChatPromptTemplate.from_template(template)

# 初始化模型，设置streaming=True启用流式输出
model = OllamaLLM(
    model="deepseek-r1:1.5b",
    temperature=0.1,
    streaming=True  # 关键设置：启用流式传输
)

# 创建处理链
chain = prompt | model

# 使用流式方式获取并打印结果
def stream_response(question):
    print("正在获取回答...\n")
    # 通过迭代器逐块获取响应
    for chunk in chain.stream({"question": question}):
        print(chunk, end="", flush=True)  # 即时打印并刷新缓冲区
    print("\n\n回答结束")

# 测试流式输出
stream_response("What is LangChain?")
