# from langchain_openai import OpenAI
from langchain_ollama import ChatOllama
from langchain_core.messages import HumanMessage, SystemMessage
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate

model = ChatOllama(model="glm4:9b")

system_message = "Translate the following into {language}:"

prompt_template = ChatPromptTemplate.from_messages([
    ("system", system_message),
    ("user", "{text}")
])

# messages = prompt_template.invoke({
#     "language": "italian",
#     "text": "hi"
# }).to_messages()

parser = StrOutputParser()

# 方法1
# result = model.invoke(messages)
# result = parser.invoke(result)

# 方法2
chain = prompt_template | model | parser
result = chain.invoke({
    "language": "italian",
    "text": "hi"
})
print(result)
