from langchain_core.runnables import RunnableParallel
import os
from langchain.chat_models import init_chat_model
from langchain_core.prompts import PromptTemplate
from dotenv import load_dotenv
load_dotenv()

model = init_chat_model(
    model="Qwen/Qwen3-8B",
    model_provider="openai",
    base_url="https://api.siliconflow.cn/v1/",
    api_key=os.environ.get('OPENAI_API_KEY'), #你注册硅基流动api_key
)

# 定义两个并行执行的链
summary_chain = PromptTemplate.from_template("总结：{text}") | model
keyword_chain = PromptTemplate.from_template("提取关键词：{text}") | model

# 并行执行
parallel_chain = RunnableParallel(
    summary=summary_chain,  # 键为"summary"，值为摘要结果
    keywords=keyword_chain  # 键为"keywords"，值为关键词结果
)

# 执行：输入会同时传给两个链
result = parallel_chain.invoke({"text": "LangChain是一个LLM应用框架..."})
print(result)  # {"summary": "...", "keywords": "..."}