from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnableParallel
from langchain_ollama import ChatOllama

model = 'llama3'
temperature = 0.2

llm = ChatOllama(
    model=model,
    temperature=temperature
).bind()

chain_joke = ChatPromptTemplate.from_template("请讲一个关于 {topic} 的笑话。请用中文回答。100字以内") | llm
chain_poem = ChatPromptTemplate.from_template("请写一首关于 {topic} 的诗。请用中文回答。100字以内") | llm

runnable = RunnableParallel(
    joke=chain_joke,
    poem=chain_poem
)

# 流式输出结果
streamOutput = runnable.stream({"topic": "dragon"})

for output in streamOutput:
    try:
        joke_content = output["joke"].content if "joke" in output and hasattr(output["joke"], 'content') else ""
        poem_content = output["poem"].content if "poem" in output and hasattr(output["poem"], 'content') else ""

        if joke_content != "":
            print("Joke:", joke_content)

        # if poem_content != "":
        #     print("Poem:", poem_content)
    except KeyError as e:
        print(f"Key error: {e} - this part of the output is missing.")
    except AttributeError as e:
        print(f"Attribute error: {e} - possible missing 'content' attribute.")

# 一次性输出结果
# 运行链 输出结果
# outputparser = runnable.invoke({"topic": "woman"})
# print("Joke:", outputparser["joke"].content)
# print("Poem:", outputparser["poem"].content)
