# from langchain_core.prompts import ChatPromptTemplate
# from langchain_ollama.llms import OllamaLLM

# template = """Question: {question}

# Answer: Let's think step by step."""

# prompt = ChatPromptTemplate.from_template(template)

# model = OllamaLLM(model="qwen-1.5b:latest")

# chain = prompt | model

# answer = chain.invoke({"question": "What is LangChain?"})

# print(answer)



from langchain_core.prompts import ChatPromptTemplate
from langchain_ollama.chat_models import ChatOllama
from langchain_core.output_parsers import StrOutputParser

template = """Question: {question}

回答: 请使用中文让我们一步一步思考."""

prompt = ChatPromptTemplate.from_template(template)

model = ChatOllama(model="qwen-1.5b:latest")

output_parser = StrOutputParser()

chain = prompt | model | output_parser

answer = chain.invoke({"question": "LLMChain(llm=chat, prompt=chat_prompt_template, verbose=verbose)可以转为langchain_ollama中的chain呢？"})

print(answer)