from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from ChatGLM_new import tongyi_llm

prompt = ChatPromptTemplate.from_template("跟我讲个笑话 {topic}")

chain = prompt | tongyi_llm | StrOutputParser()

#print(chain.invoke({"topic": "熊"}))

analysis_prompt = ChatPromptTemplate.from_template("这是一个有趣的笑话吗? {joke}")

composed_chain = {"joke": chain} | analysis_prompt | tongyi_llm | StrOutputParser()

#print(composed_chain.invoke({"topic": "熊"}))


composed_chain_with_lambda = (
    chain
    | (lambda input: {"joke": input})
    | analysis_prompt
    | tongyi_llm
    | StrOutputParser()
)

print(composed_chain_with_lambda.invoke({"topic": "甜菜"}))
