
from langchain_ollama import OllamaEmbeddings
from langchain_ollama import OllamaLLM
from langchain_milvus import Milvus
from langchain.chains import RetrievalQA

class KnowledgeQuery:
    def __init__(self):
        self.llm = OllamaLLM(
            base_url="http://192.168.7.3:11434",
            model="qwen2.5:14b",
            temperature=0.2
        )
        self.embedder = OllamaEmbeddings(
            model="bge-m3:latest",
            base_url="http://192.168.7.3:11434"
        )
        self.vectorstore = Milvus(
            embedding_function=self.embedder,
            collection_name="policy",
            connection_args={"uri": "http://192.168.6.20:19530/ipp_air_general"},
            auto_id=True
        )

    def policy_search(self, question: str, filters: dict = None):
        retriever = self.vectorstore.as_retriever(search_kwargs={
            "k": 5,
            "filter": filters or {}
        })

        qa_chain = RetrievalQA.from_chain_type(
            retriever=retriever,
            llm=self.llm,
            chain_type="stuff"
        )

        return qa_chain.invoke(question)
