# llm_engine/chatbot_service.py
from __future__ import annotations
import chromadb
from typing import Optional

from llama_index.core import Settings, VectorStoreIndex
from llama_index.vector_stores.chroma import ChromaVectorStore
from llama_index.embeddings.zhipuai import ZhipuAIEmbedding
from llama_index.llms.zhipuai import ZhipuAI
from llama_index.core.response_synthesizers import get_response_synthesizer
from llama_index.core.schema import NodeWithScore
from llama_index.core.vector_stores import MetadataFilters, ExactMatchFilter

from llm_engine.config import (
    API_KEY,
    CHROMA_DB_PATH,
    EMBEDDING_MODEL_NAME,
    LLM_MODEL_NAME
)


class ChatbotService:
    """
    以部门(metadata.department)为维度做向量召回隔离的 Chat Bot 服务。
    """

    def __init__(
        self,
        chroma_path: str = CHROMA_DB_PATH,
        collection_name: str = "project_collection",
        api_key: str = API_KEY,
        embedding_model: str = EMBEDDING_MODEL_NAME,
        llm_model: str = LLM_MODEL_NAME,
        temperature: float = 0.3,
        top_k: int = 3,
    ) -> None:
        self.top_k = top_k

        # 1) 嵌入模型（与构建索引时一致）
        Settings.embed_model = ZhipuAIEmbedding(api_key=api_key, model=embedding_model)

        # 2) 大模型
        self.llm = ZhipuAI(api_key=api_key, model=llm_model, temperature=temperature)

        # 3) 连接持久化的 Chroma 并从向量库加载索引（不会重复写入）
        self._chroma_client = chromadb.PersistentClient(path=chroma_path)
        self._collection = self._chroma_client.get_or_create_collection(collection_name)
        vector_store = ChromaVectorStore(chroma_collection=self._collection)
        self.index = VectorStoreIndex.from_vector_store(vector_store=vector_store)

        # 4) 组装一个响应合成器（可复用）
        self.synthesizer = get_response_synthesizer(
            llm=self.llm,
            response_mode="compact"  # 或者 "refine" / "tree_summarize"，根据需求
        )

    def _build_department_filters(self, department: str) -> MetadataFilters:
        """
        构造部门精确匹配过滤器。
        """
        return MetadataFilters(
            filters=[ExactMatchFilter(key="department", value=str(department))]
        )

    def _retrieve(
        self, query_text: str, department: str
    ) -> list[NodeWithScore]:
        """
        先只做检索（带部门过滤），以便我们能判断是否命中任何节点。
        """
        filters = self._build_department_filters(department)
        retriever = self.index.as_retriever(
            similarity_top_k=self.top_k,
            filters=filters,
        )
        return retriever.retrieve(query_text)

    def query(self, query_text: str, department: str) -> str:
        """
        带有部门隔离的问答。
        - 检索阶段即按 metadata.department 过滤，严格实现“跨部门不可见”。
        - 若该部门完全无命中，则给出友好提示。
        """
        # 先只做向量检索，确认是否有上下文
        nodes = self._retrieve(query_text, department)

        if not nodes:
            return (
                f"未在【{department}】部门的知识库中找到相关内容。"
                "如果你确认该信息存在，请检查该项目是否归属此部门，或联系管理员更新知识库。"
            )

        # 有上下文再进行答案综合
        answer = self.synthesizer.synthesize(
            query=query_text,
            nodes=nodes,
        )
        return str(answer)
    
if __name__ == "__main__":
    service = ChatbotService()
    try:
        answer = service.query("现在有哪些项目信息? 利润咋样?", "业务部")
        print("\n=== 模型回答正确部门 ===")
        print(answer)
        
        answer = service.query("现在有哪些项目信息? 利润咋样?", "财务部")
        print("\n=== 模型回答错误部门 ===")
        print(answer)
    except Exception as e:
        print(f"查询失败: {e}")