'''
* This is the projet for Brtc LlmOps Platform
* @Author Leon-liao <liaosiliang@alltman.com>
* @Description //TODO 
* @File: 2_study_retriever_router_impl.py
* @Time: 2025/9/15
* @All Rights Reserve By Brtc
'''
import dotenv
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough
from langchain_openai import ChatOpenAI
from pydantic import BaseModel, Field
from typing_extensions import Literal

dotenv.load_dotenv()
class RouteQuery(BaseModel):
    """将用户查询映射到相关的数据源"""
    datasource:Literal["python_docs","js_docs", "golang_docs"] = Field(description="根据用户的问题,选择那个数据源最相关以回答他们的问题")

def choose_route(result:RouteQuery):
    if "python_docs" in result.datasource.lower():
        return "python_docs"
    elif "js_docs" in result.datasource.lower():
        return "js_docs"
    else:
        return "golang_docs"
#1、构建大预言并结构化输出
llm = ChatOpenAI(model="gpt-4o-mini", temperature=0)
structed_llm = llm.with_structured_output(RouteQuery)
#2、创建路由逻辑
prompt = ChatPromptTemplate.from_messages([
    ("system","你是一个擅长将用户问题路由到适当数据源的专家。\n请根据问题涉及编程语言，将其路由到相关的数据"),
    ("human","{question}")
])
router ={"question":RunnablePassthrough()} | prompt | structed_llm | choose_route
#3、执行相关的提问。检测路由
question = """ 
为什么下面的代码不工作，请帮我检查下:
from langchain_core.prompts import ChatPromptTemplate
 
prompt = ChatPromptTemplate.from_messages(["human", "speak in {language}"])
prompt.invoke("中文")
"""
print(router.invoke(question))