from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate
from langchain_core.runnables import RunnablePassthrough
from langchain_ollama import ChatOllama

# 参数定义
model = 'llama3.1'
temperature = 0.5

prompt = ChatPromptTemplate.from_messages(
    [
        SystemMessagePromptTemplate.from_template("you are a humorous joker,your response is always Use {lang} language."),
        HumanMessagePromptTemplate.from_template("please tell me a joke about {topic}"),
    ]
)

output_parser = StrOutputParser()
model = ChatOllama(
    model=model,
    temperature=temperature,
)

# 构建链
chain = {"topic":RunnablePassthrough(),"lang":RunnablePassthrough()}  | prompt | model | output_parser

# 运行链
# outputparser = chain.invoke({"topic":"chicken","lang":"Japanese"})
output = chain.invoke({"topic":"chicken","lang":"Chinese"})

print(output)