from langchain_core.runnables import RunnableParallel
from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import ChatOpenAI
from langchain_core.output_parsers import JsonOutputParser

#初始化模型
model = ChatOpenAI(
    model_name = "qwen-plus",
    base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
    api_key="sk-005c3c25f6d042848b29d75f2f020f08",
    temperature=0.7
)

#构建解析器
parser = JsonOutputParser()

prompt_attractions = ChatPromptTemplate.from_template("""
列出{city}的{num}个景点。返回json格式:
{{
    "num":"编号“,
    "city":"城市",
    "introduce":"景点介绍“
}}
"""
)

prompt_books = ChatPromptTemplate.from_template("""
列出{city}相关的{num}本书。返回json格式:
{{
    "num":"编号“,
    "city":"城市",
    "introduce":"书本介绍“
}}
"""
)

chain1 = prompt_attractions | model | parser
chain2 = prompt_books | model | parser
chain = RunnableParallel(
    {
        "attractions": chain1,
        "books": chain2
    }
)
result = chain.invoke({"city":"北京", "num":3})

print(result)