from langchain.chat_models import ChatOpenAI
from langchain.schema import HumanMessage
from langgraph.graph import Graph

class AdaptiveRAGPipeline:
    def __init__(self, api_key, model_name):
        self.llm = ChatOpenAI(openai_api_key=api_key, model=model_name) # type: ignore
        self.graph = Graph()  # 这里可扩展更多节点逻辑
        # 你可以在这里加载文档、向量数据库等

    def query(self, question: str) -> str:
        """
        简化的 Adaptive RAG 实现，调用大模型返回答案。
        可以在此扩展：检索 -> 生成 -> 自适应调整。
        """
        messages = [HumanMessage(content=question)]
        response = self.llm(messages)
        return response.content
