# knowledge-library/services/query_processor.py
import logging
from typing import Dict, List
from config import settings
from .vector_db import VectorDBManager
from .openrouter import OpenRouterClient

logger = logging.getLogger(__name__)


class QueryProcessor:
    def __init__(self, vector_db: VectorDBManager, llm_client: OpenRouterClient):
        self.vector_db = vector_db
        self.llm_client = llm_client

    async def hybrid_query(self, question: str, use_llm: bool = True, top_k: int = 5) -> Dict:
        """执行混合查询"""
        try:
            # 本地向量检索
            local_results = self.vector_db.search(
                query=question,
                top_k=top_k
            )

            if not use_llm:
                return {
                    "answer": None,
                    "references": local_results,
                    "cost": 0.0
                }

            # 构造LLM提示
            context = "\n".join([f"- {res['content']}" for res in local_results])
            prompt = f"""基于以下知识库内容：
{context}

请回答：{question}
"""

            # 调用OpenRouter
            llm_response = await self.llm_client.chat_completion(
                messages=[{"role": "user", "content": prompt}],
                model=settings.openrouter_model
            )

            return {
                "answer": llm_response["content"],
                "references": local_results,
                "cost": llm_response.get("cost", 0.0)
            }
        except Exception as e:
            logger.error(f"混合查询失败: {str(e)}")
            raise