from langchain_core.runnables import RunnableBranch,RunnableLambda
from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import ChatOpenAI
from langchain_core.output_parsers import StrOutputParser

#定义模型
model = ChatOpenAI(
    model_name = "qwen-plus",
    base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
    api_key="sk-005c3c25f6d042848b29d75f2f020f08",
    temperature=0.7
)

#技术支持链
tech_prompt = ChatPromptTemplate.from_template(
    "你是一名技术支持专家,请回答一下技术问题：{input}"
)
tech_chain = tech_prompt | model | StrOutputParser()

#财务链
billing_prompt = ChatPromptTemplate.from_template(
    "你是一名财务专员,请处理以下账单问题：{input}"
)
billing_prompt = billing_prompt | model | StrOutputParser()

#默认通用链
default_prompt = ChatPromptTemplate.from_template(
    "你是一名客服,请回答一下技术问题：{input}"
)
default_chain = default_prompt | model | StrOutputParser()


def is_tech_question(input:dict) -> bool:
    "获取input键的对应值"
    input_value = input.get("input","")
    #检查是否包含关键词
    return "技术" in input_value or "故障" in input_value


def is_billing_question(input:dict) -> bool:
    "获取input键的对应值"
    input_value = input.get("input","")
    #检查是 否包含关键词
    return "账单" in input_value or "支付" in input_value

branch = RunnableBranch(
    (is_tech_question, tech_chain),
    (is_billing_question, billing_prompt),
    default_chain
)

#full_chain = RunnableLambda(lambda x: {"input":x}) | branch

def log_decision(input_data):
    print(f"路由检查输入:{input_data}")
    return input_data

log_chain_branch = RunnableLambda(log_decision) | branch
full_chain = RunnableLambda(lambda x: {"input":x}) | log_chain_branch

#测试技术问题
tech_response = full_chain.invoke("我的账号登录失败,提示技术故障")
print(tech_response)

#测试技术问题
default_response = full_chain.invoke("你们公司的地址在哪里")
print(default_response)