from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnableParallel
from langchain_openai import ChatOpenAI

llm = ChatOpenAI(
    api_key="sk-a3f7718fb81f43b2915f0a6483b6661b",
    base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
    model="llama-4-scout-17b-16e-instruct",  # 此处以qwen-plus为例，您可按需更换模型名称。模型列表：https://help.aliyun.com/zh/model-studio/getting-started/models
    # other params...
)
prompt1 = ChatPromptTemplate.from_template("请讲一个关于{topic}的笑话")
prompt2 = ChatPromptTemplate.from_template("请写两句关于{topic}的诗歌")
output = StrOutputParser()
chain1 = prompt1 | llm
chain2 = prompt2 | llm
#RunnableParallel 并行速度会更快
combined = RunnableParallel(joke=chain1, poem=chain2)
print(combined.invoke({"topic":"猫"}))