import configparser
from pathlib import Path

from langchain_openai import ChatOpenAI
from langchain.chains import RetrievalQA

from knowledge_base_manager import KnowledgeBaseManager
from logger_setup import log

SCRIPT_DIR = Path(__file__).resolve().parent
CONFIG_PATH = SCRIPT_DIR / 'config.ini'

class QAAgent:
    """
    An agent that can answer questions based on the knowledge base.
    """
    def __init__(self):
        self.config = configparser.ConfigParser()
        self.config.read(CONFIG_PATH)
        
        # 1. Initialize the Knowledge Base to get access to the vector store
        log.info("Initializing Knowledge Base...")
        kb_manager = KnowledgeBaseManager()
        self.vector_store = kb_manager.vector_store
        log.info("Knowledge Base connection successful.")

        # 2. Initialize the LLM for generating answers
        self.llm = self._load_llm_from_profile()

        # 3. Build the RAG chain
        self.qa_chain = self._build_rag_chain()

    def _load_llm_from_profile(self):
        """Loads the chat LLM from the active profile in the config."""
        active_profile = self.config.get('LLM', 'active_profile')
        log.info(f"Loading LLM using active profile: '{active_profile}'")
        profile = self.config[active_profile]
        return ChatOpenAI(
            model=profile.get('model_name'),
            api_key=profile.get('api_key'),
            base_url=profile.get('base_url'),
            temperature=0.3 # Lower temperature for more factual answers
        )

    def _build_rag_chain(self):
        """Builds the RetrievalQA chain."""
        log.info("Building the RAG chain...")
        
        # Create a retriever from our vector store
        retriever = self.vector_store.as_retriever(search_kwargs={"k": 5})
        
        # This chain will find relevant documents and then use the LLM to answer
        qa_chain = RetrievalQA.from_chain_type(
            llm=self.llm,
            chain_type="stuff", # Puts all retrieved text into the prompt 将所有检索到的文本放入提示中
            retriever=retriever,
            return_source_documents=True, # We want to see where the answer came from
            verbose=True # Shows the intermediate steps
        )
        log.info("RAG chain built successfully.")
        return qa_chain

    def ask(self, query: str):
        """
        Asks a question to the QA chain and prints the response.
        """
        log.info(f"--- New Query: {query} ---")
        
        # The `invoke` method runs the chain
        response = self.qa_chain.invoke(query)
        
        print("\n---\n")
        print(f"[QUESTION]: {query}")
        print("\n[ANSWER]:")
        print(response['result'])
        print("\n[SOURCES]:")
        for doc in response['source_documents']:
            print(f"  - {doc.metadata['source']}")
        print("\n---")

if __name__ == "__main__":
    agent = QAAgent()
    
    # Example question - replace with your own!
    question = "课程的核心知识点有哪些"
    agent.ask(question)

